use of org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse in project hive by apache.
the class ObjectStore method getResourcePlanErrors.
private WMValidateResourcePlanResponse getResourcePlanErrors(MWMResourcePlan mResourcePlan) {
WMValidateResourcePlanResponse response = new WMValidateResourcePlanResponse();
response.setErrors(new ArrayList());
response.setWarnings(new ArrayList());
Integer rpParallelism = mResourcePlan.getQueryParallelism();
if (rpParallelism != null && rpParallelism < 1) {
response.addToErrors("Query parallelism should for resource plan be positive. Got: " + rpParallelism);
}
int totalQueryParallelism = 0;
Map<String, PoolData> poolInfo = new HashMap<>();
for (MWMPool pool : mResourcePlan.getPools()) {
PoolData currentPoolData = getPoolData(poolInfo, pool.getPath());
currentPoolData.found = true;
String parent = getParentPath(pool.getPath(), "");
PoolData parentPoolData = getPoolData(poolInfo, parent);
parentPoolData.hasChildren = true;
parentPoolData.totalChildrenAllocFraction += pool.getAllocFraction();
if (pool.getQueryParallelism() != null && pool.getQueryParallelism() < 1) {
response.addToErrors("Invalid query parallelism for pool: " + pool.getPath());
} else {
totalQueryParallelism += pool.getQueryParallelism();
}
if (!MetaStoreUtils.isValidSchedulingPolicy(pool.getSchedulingPolicy())) {
response.addToErrors("Invalid scheduling policy " + pool.getSchedulingPolicy() + " for pool: " + pool.getPath());
}
}
if (rpParallelism != null) {
if (rpParallelism < totalQueryParallelism) {
response.addToErrors("Sum of all pools' query parallelism: " + totalQueryParallelism + " exceeds resource plan query parallelism: " + rpParallelism);
} else if (rpParallelism != totalQueryParallelism) {
response.addToWarnings("Sum of all pools' query parallelism: " + totalQueryParallelism + " is less than resource plan query parallelism: " + rpParallelism);
}
}
for (Entry<String, PoolData> entry : poolInfo.entrySet()) {
final PoolData poolData = entry.getValue();
final boolean isRoot = entry.getKey().isEmpty();
// Special case for root parent
if (isRoot) {
poolData.found = true;
if (!poolData.hasChildren) {
response.addToErrors("Root has no children");
// TODO: change fractions to use decimal? somewhat brittle
} else if (Math.abs(1.0 - poolData.totalChildrenAllocFraction) > 0.00001) {
response.addToErrors("Sum of root children pools' alloc fraction should be 1.0 got: " + poolData.totalChildrenAllocFraction + " for pool: " + entry.getKey());
}
}
if (!poolData.found) {
response.addToErrors("Pool does not exists but has children: " + entry.getKey());
}
if (poolData.hasChildren) {
if (!isRoot && (poolData.totalChildrenAllocFraction - 1.0) > 0.00001) {
response.addToErrors("Sum of children pools' alloc fraction should be less than 1 got: " + poolData.totalChildrenAllocFraction + " for pool: " + entry.getKey());
}
}
}
// available and grammar check is there in the language itself.
return response;
}
use of org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse in project hive by apache.
the class ObjectStore method switchStatus.
private WMFullResourcePlan switchStatus(String name, MWMResourcePlan mResourcePlan, String status, boolean canActivateDisabled, boolean canDeactivate) throws InvalidOperationException {
Status currentStatus = mResourcePlan.getStatus();
Status newStatus = null;
try {
newStatus = Status.valueOf(status);
} catch (IllegalArgumentException e) {
throw new InvalidOperationException("Invalid status: " + status);
}
if (newStatus == currentStatus) {
return null;
}
boolean doActivate = false, doValidate = false;
switch(currentStatus) {
case // No status change for active resource plan, first activate another plan.
ACTIVE:
if (!canDeactivate) {
throw new InvalidOperationException("Resource plan " + name + " is active; activate another plan first, or disable workload management.");
}
break;
case DISABLED:
assert newStatus == Status.ACTIVE || newStatus == Status.ENABLED;
doValidate = true;
doActivate = (newStatus == Status.ACTIVE);
if (doActivate && !canActivateDisabled) {
throw new InvalidOperationException("Resource plan " + name + " is disabled and should be enabled before activation (or in the same command)");
}
break;
case ENABLED:
if (newStatus == Status.DISABLED) {
mResourcePlan.setStatus(newStatus);
// A simple case.
return null;
}
assert newStatus == Status.ACTIVE;
doActivate = true;
break;
default:
throw new AssertionError("Unexpected status " + currentStatus);
}
if (doValidate) {
// Note: this may use additional inputs from the caller, e.g. maximum query
// parallelism in the cluster based on physical constraints.
WMValidateResourcePlanResponse response = getResourcePlanErrors(mResourcePlan);
if (!response.getErrors().isEmpty()) {
throw new InvalidOperationException("ResourcePlan: " + name + " is invalid: " + response.getErrors());
}
}
if (doActivate) {
// Deactivate currently active resource plan.
deactivateActiveResourcePlan(mResourcePlan.getNs());
mResourcePlan.setStatus(newStatus);
return fullFromMResourcePlan(mResourcePlan);
} else {
mResourcePlan.setStatus(newStatus);
}
return null;
}
use of org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse in project hive by apache.
the class ObjectStore method validateResourcePlan.
@Override
public WMValidateResourcePlanResponse validateResourcePlan(String name, String ns) throws NoSuchObjectException, InvalidObjectException, MetaException {
name = normalizeIdentifier(name);
boolean committed = false;
Query query = null;
try {
query = createGetResourcePlanQuery();
MWMResourcePlan mResourcePlan = (MWMResourcePlan) query.execute(name, ns);
if (mResourcePlan == null) {
throw new NoSuchObjectException("Cannot find resourcePlan: " + name + " in " + ns);
}
WMValidateResourcePlanResponse result = getResourcePlanErrors(mResourcePlan);
committed = commitTransaction();
return result;
} finally {
rollbackAndCleanup(committed, query);
}
}
use of org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse in project hive by apache.
the class DDLTask method alterResourcePlan.
private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveException {
if (desc.shouldValidate()) {
WMValidateResourcePlanResponse result = db.validateResourcePlan(desc.getResourcePlanName());
try (DataOutputStream out = getOutputStream(desc.getResFile())) {
formatter.showErrors(out, result);
} catch (IOException e) {
throw new HiveException(e);
}
;
return 0;
}
WMNullableResourcePlan resourcePlan = desc.getResourcePlan();
final WorkloadManager wm = WorkloadManager.getInstance();
final TezSessionPoolManager pm = TezSessionPoolManager.getInstance();
boolean isActivate = false, isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
if (resourcePlan.getStatus() != null) {
isActivate = resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE;
}
WMFullResourcePlan appliedRp = db.alterResourcePlan(desc.getResourcePlanName(), resourcePlan, desc.isEnableActivate(), desc.isForceDeactivate(), desc.isReplace());
boolean mustHaveAppliedChange = isActivate || desc.isForceDeactivate();
if (!mustHaveAppliedChange && !desc.isReplace()) {
// The modification cannot affect an active plan.
return 0;
}
if (appliedRp == null && !mustHaveAppliedChange) {
// Replacing an inactive plan.
return 0;
}
if (wm == null && isInTest) {
// Skip for tests if WM is not present.
return 0;
}
if ((appliedRp == null) != desc.isForceDeactivate()) {
throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable");
// TODO: shut down HS2?
}
assert appliedRp == null || appliedRp.getPlan().getStatus() == WMResourcePlanStatus.ACTIVE;
handleWorkloadManagementServiceChange(wm, pm, isActivate, appliedRp);
return 0;
}
Aggregations