use of org.apache.hadoop.hive.metastore.model.MWMPool in project hive by apache.
the class ObjectStore method dropWMTriggerToPoolMapping.
@Override
public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
boolean commited = false;
try {
openTransaction();
MWMResourcePlan resourcePlan = getMWMResourcePlan(resourcePlanName, true);
MWMPool pool = getPool(resourcePlan, poolPath);
MWMTrigger trigger = getTrigger(resourcePlan, triggerName);
pool.getTriggers().remove(trigger);
trigger.getPools().remove(pool);
pm.makePersistent(pool);
pm.makePersistent(trigger);
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, (Query) null);
}
}
use of org.apache.hadoop.hive.metastore.model.MWMPool in project hive by apache.
the class ObjectStore method handleSimpleAlter.
private WMFullResourcePlan handleSimpleAlter(String name, WMNullableResourcePlan changes, boolean canActivateDisabled, boolean canDeactivate) throws InvalidOperationException, NoSuchObjectException, MetaException {
MWMResourcePlan plan = name == null ? getActiveMWMResourcePlan() : getMWMResourcePlan(name, !changes.isSetStatus());
boolean hasNameChange = changes.isSetName() && !changes.getName().equals(name);
// Verify that field changes are consistent with what Hive does. Note: we could handle this.
if (changes.isSetIsSetQueryParallelism() || changes.isSetIsSetDefaultPoolPath() || hasNameChange) {
if (changes.isSetStatus()) {
throw new InvalidOperationException("Cannot change values during status switch.");
} else if (plan.getStatus() != MWMResourcePlan.Status.DISABLED) {
throw new InvalidOperationException("Resource plan must be disabled to edit it.");
}
}
// Handle rename and other changes.
if (changes.isSetName()) {
String newName = normalizeIdentifier(changes.getName());
if (newName.isEmpty()) {
throw new InvalidOperationException("Cannot rename to empty value.");
}
if (!newName.equals(plan.getName())) {
plan.setName(newName);
}
}
if (changes.isSetIsSetQueryParallelism() && changes.isIsSetQueryParallelism()) {
if (changes.isSetQueryParallelism()) {
if (changes.getQueryParallelism() <= 0) {
throw new InvalidOperationException("queryParallelism should be positive.");
}
plan.setQueryParallelism(changes.getQueryParallelism());
} else {
plan.setQueryParallelism(null);
}
}
if (changes.isSetIsSetDefaultPoolPath() && changes.isIsSetDefaultPoolPath()) {
if (changes.isSetDefaultPoolPath()) {
MWMPool pool = getPool(plan, changes.getDefaultPoolPath());
plan.setDefaultPool(pool);
} else {
plan.setDefaultPool(null);
}
}
// Handle the status change.
if (changes.isSetStatus()) {
return switchStatus(name, plan, changes.getStatus().name(), canActivateDisabled, canDeactivate);
}
return null;
}
use of org.apache.hadoop.hive.metastore.model.MWMPool in project hive by apache.
the class ObjectStore method createPool.
@Override
public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException {
boolean commited = false;
try {
openTransaction();
MWMResourcePlan resourcePlan = getMWMResourcePlan(pool.getResourcePlanName(), true);
if (!poolParentExists(resourcePlan, pool.getPoolPath())) {
throw new NoSuchObjectException("Pool path is invalid, the parent does not exist");
}
String policy = pool.getSchedulingPolicy();
if (!MetaStoreUtils.isValidSchedulingPolicy(policy)) {
throw new InvalidOperationException("Invalid scheduling policy " + policy);
}
MWMPool mPool = new MWMPool(resourcePlan, pool.getPoolPath(), pool.getAllocFraction(), pool.getQueryParallelism(), policy);
pm.makePersistent(mPool);
commited = commitTransaction();
} catch (Exception e) {
checkForConstraintException(e, "Pool already exists: ");
throw e;
} finally {
rollbackAndCleanup(commited, (Query) null);
}
}
Aggregations