use of org.apache.hadoop.hive.ql.exec.tez.WorkloadManager in project hive by apache.
the class TestTriggersMoveWorkloadManager method setupTriggers.
private void setupTriggers(final List<Trigger> biTriggers, final List<Trigger> etlTriggers) throws Exception {
WorkloadManager wm = WorkloadManager.getInstance();
WMPool biPool = pool("BI", 1, 0.8f);
WMPool etlPool = pool("ETL", 1, 0.2f);
WMFullResourcePlan plan = new WMFullResourcePlan(plan(), Lists.newArrayList(biPool, etlPool));
plan.getPlan().setDefaultPoolPath("BI");
for (Trigger trigger : biTriggers) {
plan.addToTriggers(wmTriggerFromTrigger(trigger));
plan.addToPoolTriggers(new WMPoolTrigger("BI", trigger.getName()));
}
for (Trigger trigger : etlTriggers) {
plan.addToTriggers(wmTriggerFromTrigger(trigger));
plan.addToPoolTriggers(new WMPoolTrigger("ETL", trigger.getName()));
}
wm.updateResourcePlanAsync(plan).get(10, TimeUnit.SECONDS);
}
use of org.apache.hadoop.hive.ql.exec.tez.WorkloadManager in project hive by apache.
the class TestTriggersWorkloadManager method setupTriggers.
@Override
protected void setupTriggers(final List<Trigger> triggers) throws Exception {
WorkloadManager wm = WorkloadManager.getInstance();
WMPool pool = new WMPool("rp", "llap");
pool.setAllocFraction(1.0f);
pool.setQueryParallelism(1);
WMFullResourcePlan rp = new WMFullResourcePlan(new WMResourcePlan("rp"), Lists.newArrayList(pool));
rp.getPlan().setDefaultPoolPath("llap");
for (Trigger trigger : triggers) {
rp.addToTriggers(wmTriggerFromTrigger(trigger));
rp.addToPoolTriggers(new WMPoolTrigger("llap", trigger.getName()));
}
wm.updateResourcePlanAsync(rp).get(10, TimeUnit.SECONDS);
}
use of org.apache.hadoop.hive.ql.exec.tez.WorkloadManager in project hive by apache.
the class DDLTask method alterResourcePlan.
private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveException {
if (desc.shouldValidate()) {
WMValidateResourcePlanResponse result = db.validateResourcePlan(desc.getResourcePlanName());
try (DataOutputStream out = getOutputStream(desc.getResFile())) {
formatter.showErrors(out, result);
} catch (IOException e) {
throw new HiveException(e);
}
;
return 0;
}
WMNullableResourcePlan resourcePlan = desc.getResourcePlan();
final WorkloadManager wm = WorkloadManager.getInstance();
final TezSessionPoolManager pm = TezSessionPoolManager.getInstance();
boolean isActivate = false, isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
if (resourcePlan.getStatus() != null) {
isActivate = resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE;
}
WMFullResourcePlan appliedRp = db.alterResourcePlan(desc.getResourcePlanName(), resourcePlan, desc.isEnableActivate(), desc.isForceDeactivate(), desc.isReplace());
boolean mustHaveAppliedChange = isActivate || desc.isForceDeactivate();
if (!mustHaveAppliedChange && !desc.isReplace()) {
// The modification cannot affect an active plan.
return 0;
}
if (appliedRp == null && !mustHaveAppliedChange) {
// Replacing an inactive plan.
return 0;
}
if (wm == null && isInTest) {
// Skip for tests if WM is not present.
return 0;
}
if ((appliedRp == null) != desc.isForceDeactivate()) {
throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable");
// TODO: shut down HS2?
}
assert appliedRp == null || appliedRp.getPlan().getStatus() == WMResourcePlanStatus.ACTIVE;
handleWorkloadManagementServiceChange(wm, pm, isActivate, appliedRp);
return 0;
}
Aggregations