use of org.apache.hadoop.hive.metastore.model.MWMPool in project hive by apache.
the class ObjectStore method alterPool.
@Override
public void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException {
boolean commited = false;
try {
openTransaction();
MWMResourcePlan resourcePlan = getMWMResourcePlan(pool.getResourcePlanName(), true);
MWMPool mPool = getPool(resourcePlan, poolPath);
pm.retrieve(mPool);
if (pool.isSetAllocFraction()) {
mPool.setAllocFraction(pool.getAllocFraction());
}
if (pool.isSetQueryParallelism()) {
mPool.setQueryParallelism(pool.getQueryParallelism());
}
if (pool.isSetIsSetSchedulingPolicy() && pool.isIsSetSchedulingPolicy()) {
if (pool.isSetSchedulingPolicy()) {
String policy = pool.getSchedulingPolicy();
if (!MetaStoreUtils.isValidSchedulingPolicy(policy)) {
throw new InvalidOperationException("Invalid scheduling policy " + policy);
}
mPool.setSchedulingPolicy(pool.getSchedulingPolicy());
} else {
mPool.setSchedulingPolicy(null);
}
}
if (pool.isSetPoolPath() && !pool.getPoolPath().equals(mPool.getPath())) {
moveDescendents(resourcePlan, mPool.getPath(), pool.getPoolPath());
mPool.setPath(pool.getPoolPath());
}
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, (Query) null);
}
}
use of org.apache.hadoop.hive.metastore.model.MWMPool in project hive by apache.
the class ObjectStore method getResourcePlanErrors.
private WMValidateResourcePlanResponse getResourcePlanErrors(MWMResourcePlan mResourcePlan) {
WMValidateResourcePlanResponse response = new WMValidateResourcePlanResponse();
response.setErrors(new ArrayList());
response.setWarnings(new ArrayList());
Integer rpParallelism = mResourcePlan.getQueryParallelism();
if (rpParallelism != null && rpParallelism < 1) {
response.addToErrors("Query parallelism should for resource plan be positive. Got: " + rpParallelism);
}
int totalQueryParallelism = 0;
Map<String, PoolData> poolInfo = new HashMap<>();
for (MWMPool pool : mResourcePlan.getPools()) {
PoolData currentPoolData = getPoolData(poolInfo, pool.getPath());
currentPoolData.found = true;
String parent = getParentPath(pool.getPath(), "");
PoolData parentPoolData = getPoolData(poolInfo, parent);
parentPoolData.hasChildren = true;
parentPoolData.totalChildrenAllocFraction += pool.getAllocFraction();
if (pool.getQueryParallelism() != null && pool.getQueryParallelism() < 1) {
response.addToErrors("Invalid query parallelism for pool: " + pool.getPath());
} else {
totalQueryParallelism += pool.getQueryParallelism();
}
if (!MetaStoreUtils.isValidSchedulingPolicy(pool.getSchedulingPolicy())) {
response.addToErrors("Invalid scheduling policy " + pool.getSchedulingPolicy() + " for pool: " + pool.getPath());
}
}
if (rpParallelism != null) {
if (rpParallelism < totalQueryParallelism) {
response.addToErrors("Sum of all pools' query parallelism: " + totalQueryParallelism + " exceeds resource plan query parallelism: " + rpParallelism);
} else if (rpParallelism != totalQueryParallelism) {
response.addToWarnings("Sum of all pools' query parallelism: " + totalQueryParallelism + " is less than resource plan query parallelism: " + rpParallelism);
}
}
for (Entry<String, PoolData> entry : poolInfo.entrySet()) {
final PoolData poolData = entry.getValue();
final boolean isRoot = entry.getKey().isEmpty();
// Special case for root parent
if (isRoot) {
poolData.found = true;
if (!poolData.hasChildren) {
response.addToErrors("Root has no children");
} else if (Math.abs(1.0 - poolData.totalChildrenAllocFraction) > 0.001) {
response.addToErrors("Sum of root children pools' alloc fraction should be 1.0 got: " + poolData.totalChildrenAllocFraction + " for pool: " + entry.getKey());
}
}
if (!poolData.found) {
response.addToErrors("Pool does not exists but has children: " + entry.getKey());
}
if (poolData.hasChildren) {
if (!isRoot && 1.0 <= poolData.totalChildrenAllocFraction) {
response.addToErrors("Sum of children pools' alloc fraction should be less than 1 got: " + poolData.totalChildrenAllocFraction + " for pool: " + entry.getKey());
}
}
}
// available and grammar check is there in the language itself.
return response;
}
use of org.apache.hadoop.hive.metastore.model.MWMPool in project hive by apache.
the class ObjectStore method createOrUpdateWMMapping.
@Override
public void createOrUpdateWMMapping(WMMapping mapping, boolean update) throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException {
EntityType entityType = EntityType.valueOf(mapping.getEntityType().trim().toUpperCase());
String entityName = normalizeIdentifier(mapping.getEntityName());
boolean commited = false;
Query query = null;
try {
openTransaction();
MWMResourcePlan resourcePlan = getMWMResourcePlan(mapping.getResourcePlanName(), true);
MWMPool pool = null;
if (mapping.isSetPoolPath()) {
pool = getPool(resourcePlan, mapping.getPoolPath());
}
if (!update) {
MWMMapping mMapping = new MWMMapping(resourcePlan, entityType, entityName, pool, mapping.getOrdering());
pm.makePersistent(mMapping);
} else {
query = pm.newQuery(MWMMapping.class, "resourcePlan == rp && entityType == type " + "&& entityName == name");
query.declareParameters("MWMResourcePlan rp, java.lang.String type, java.lang.String name");
query.setUnique(true);
MWMMapping mMapping = (MWMMapping) query.execute(resourcePlan, entityType.toString(), entityName);
mMapping.setPool(pool);
}
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
}
use of org.apache.hadoop.hive.metastore.model.MWMPool in project hive by apache.
the class ObjectStore method moveDescendents.
private void moveDescendents(MWMResourcePlan resourcePlan, String path, String newPoolPath) throws NoSuchObjectException {
if (!poolParentExists(resourcePlan, newPoolPath)) {
throw new NoSuchObjectException("Pool path is invalid, the parent does not exist");
}
boolean commited = false;
Query query = null;
openTransaction();
try {
query = pm.newQuery(MWMPool.class, "resourcePlan == rp && path.startsWith(poolPath)");
query.declareParameters("MWMResourcePlan rp, java.lang.String poolPath");
List<MWMPool> descPools = (List<MWMPool>) query.execute(resourcePlan, path + ".");
pm.retrieveAll(descPools);
for (MWMPool pool : descPools) {
pool.setPath(newPoolPath + pool.getPath().substring(path.length()));
}
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
}
use of org.apache.hadoop.hive.metastore.model.MWMPool in project hive by apache.
the class ObjectStore method createWMTriggerToPoolMapping.
@Override
public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath) throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException {
boolean commited = false;
try {
openTransaction();
MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName, true);
MWMPool pool = getPool(resourcePlan, poolPath);
MWMTrigger trigger = getTrigger(resourcePlan, triggerName);
pool.getTriggers().add(trigger);
trigger.getPools().add(pool);
pm.makePersistent(pool);
pm.makePersistent(trigger);
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, (Query) null);
}
}
Aggregations