Search in sources :

Example 1 with WMPool

use of org.apache.hadoop.hive.metastore.api.WMPool in project hive by apache.

the class DDLSemanticAnalyzer method analyzeCreatePool.

private void analyzeCreatePool(ASTNode ast) throws SemanticException {
    // TODO: allow defaults for e.g. scheduling policy.
    if (ast.getChildCount() < 3) {
        throw new SemanticException("Expected more arguments: " + ast.toStringTree());
    }
    String rpName = unescapeIdentifier(ast.getChild(0).getText());
    String poolPath = poolPath(ast.getChild(1));
    WMPool pool = new WMPool(rpName, poolPath);
    for (int i = 2; i < ast.getChildCount(); ++i) {
        Tree child = ast.getChild(i);
        if (child.getChildCount() != 1) {
            throw new SemanticException("Expected 1 paramter for: " + child.getText());
        }
        String param = child.getChild(0).getText();
        switch(child.getType()) {
            case HiveParser.TOK_ALLOC_FRACTION:
                pool.setAllocFraction(Double.parseDouble(param));
                break;
            case HiveParser.TOK_QUERY_PARALLELISM:
                pool.setQueryParallelism(Integer.parseInt(param));
                break;
            case HiveParser.TOK_SCHEDULING_POLICY:
                String schedulingPolicyStr = PlanUtils.stripQuotes(param);
                if (!MetaStoreUtils.isValidSchedulingPolicy(schedulingPolicyStr)) {
                    throw new SemanticException("Invalid scheduling policy " + schedulingPolicyStr);
                }
                pool.setSchedulingPolicy(schedulingPolicyStr);
                break;
            case HiveParser.TOK_PATH:
                throw new SemanticException("Invalid parameter path in create pool");
        }
    }
    if (!pool.isSetAllocFraction()) {
        throw new SemanticException("alloc_fraction should be specified for a pool");
    }
    if (!pool.isSetQueryParallelism()) {
        throw new SemanticException("query_parallelism should be specified for a pool");
    }
    CreateOrAlterWMPoolDesc desc = new CreateOrAlterWMPoolDesc(pool, poolPath, false);
    addServiceOutput();
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Also used : DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) CreateOrAlterWMPoolDesc(org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc) CommonTree(org.antlr.runtime.tree.CommonTree) Tree(org.antlr.runtime.tree.Tree) WMPool(org.apache.hadoop.hive.metastore.api.WMPool) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) NotNullConstraint(org.apache.hadoop.hive.ql.metadata.NotNullConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)

Example 2 with WMPool

use of org.apache.hadoop.hive.metastore.api.WMPool in project hive by apache.

the class ObjectStore method fromMPool.

private WMPool fromMPool(MWMPool mPool, String rpName) {
    WMPool result = new WMPool(rpName, mPool.getPath());
    assert mPool.getAllocFraction() != null;
    result.setAllocFraction(mPool.getAllocFraction());
    assert mPool.getQueryParallelism() != null;
    result.setQueryParallelism(mPool.getQueryParallelism());
    result.setSchedulingPolicy(mPool.getSchedulingPolicy());
    return result;
}
Also used : WMPool(org.apache.hadoop.hive.metastore.api.WMPool) MWMPool(org.apache.hadoop.hive.metastore.model.MWMPool)

Example 3 with WMPool

use of org.apache.hadoop.hive.metastore.api.WMPool in project hive by apache.

the class TestTriggersMoveWorkloadManager method setupTriggers.

private void setupTriggers(final List<Trigger> biTriggers, final List<Trigger> etlTriggers) throws Exception {
    WorkloadManager wm = WorkloadManager.getInstance();
    WMPool biPool = pool("BI", 1, 0.8f);
    WMPool etlPool = pool("ETL", 1, 0.2f);
    WMFullResourcePlan plan = new WMFullResourcePlan(plan(), Lists.newArrayList(biPool, etlPool));
    plan.getPlan().setDefaultPoolPath("BI");
    for (Trigger trigger : biTriggers) {
        plan.addToTriggers(wmTriggerFromTrigger(trigger));
        plan.addToPoolTriggers(new WMPoolTrigger("BI", trigger.getName()));
    }
    for (Trigger trigger : etlTriggers) {
        plan.addToTriggers(wmTriggerFromTrigger(trigger));
        plan.addToPoolTriggers(new WMPoolTrigger("ETL", trigger.getName()));
    }
    wm.updateResourcePlanAsync(plan).get(10, TimeUnit.SECONDS);
}
Also used : WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) WMPoolTrigger(org.apache.hadoop.hive.metastore.api.WMPoolTrigger) ExecutionTrigger(org.apache.hadoop.hive.ql.wm.ExecutionTrigger) Trigger(org.apache.hadoop.hive.ql.wm.Trigger) WMPool(org.apache.hadoop.hive.metastore.api.WMPool) WMPoolTrigger(org.apache.hadoop.hive.metastore.api.WMPoolTrigger) WorkloadManager(org.apache.hadoop.hive.ql.exec.tez.WorkloadManager)

Example 4 with WMPool

use of org.apache.hadoop.hive.metastore.api.WMPool in project hive by apache.

the class TestTriggersWorkloadManager method setupTriggers.

@Override
protected void setupTriggers(final List<Trigger> triggers) throws Exception {
    WorkloadManager wm = WorkloadManager.getInstance();
    WMPool pool = new WMPool("rp", "llap");
    pool.setAllocFraction(1.0f);
    pool.setQueryParallelism(1);
    WMFullResourcePlan rp = new WMFullResourcePlan(new WMResourcePlan("rp"), Lists.newArrayList(pool));
    rp.getPlan().setDefaultPoolPath("llap");
    for (Trigger trigger : triggers) {
        rp.addToTriggers(wmTriggerFromTrigger(trigger));
        rp.addToPoolTriggers(new WMPoolTrigger("llap", trigger.getName()));
    }
    wm.updateResourcePlanAsync(rp).get(10, TimeUnit.SECONDS);
}
Also used : WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) WMPoolTrigger(org.apache.hadoop.hive.metastore.api.WMPoolTrigger) Trigger(org.apache.hadoop.hive.ql.wm.Trigger) WMResourcePlan(org.apache.hadoop.hive.metastore.api.WMResourcePlan) WMPool(org.apache.hadoop.hive.metastore.api.WMPool) WMPoolTrigger(org.apache.hadoop.hive.metastore.api.WMPoolTrigger) WorkloadManager(org.apache.hadoop.hive.ql.exec.tez.WorkloadManager)

Example 5 with WMPool

use of org.apache.hadoop.hive.metastore.api.WMPool in project hive by apache.

the class WorkloadManager method applyNewResourcePlanOnMasterThread.

private void applyNewResourcePlanOnMasterThread(EventState e, WmThreadSyncWork syncWork, HashSet<String> poolsToRedistribute) {
    int totalQueryParallelism = 0;
    WMFullResourcePlan plan = e.resourcePlanToApply;
    if (plan == null) {
        // NULL plan means WM is disabled via a command; it could still be reenabled.
        LOG.info("Disabling workload management because the resource plan has been removed");
        this.rpName = null;
        this.defaultPool = null;
        this.userPoolMapping = new UserPoolMapping(null, null);
    } else {
        this.rpName = plan.getPlan().getName();
        this.defaultPool = plan.getPlan().getDefaultPoolPath();
        this.userPoolMapping = new UserPoolMapping(plan.getMappings(), defaultPool);
    }
    // Note: we assume here that plan has been validated beforehand, so we don't verify
    // that fractions or query parallelism add up, etc.
    Map<String, PoolState> oldPools = pools;
    pools = new HashMap<>();
    ArrayList<List<WMPool>> poolsByLevel = new ArrayList<>();
    if (plan != null) {
        // first distribute them by levels, then add level by level.
        for (WMPool pool : plan.getPools()) {
            String fullName = pool.getPoolPath();
            int ix = StringUtils.countMatches(fullName, POOL_SEPARATOR_STR);
            while (poolsByLevel.size() <= ix) {
                // We expect all the levels to have items.
                poolsByLevel.add(new LinkedList<WMPool>());
            }
            poolsByLevel.get(ix).add(pool);
        }
    }
    for (int level = 0; level < poolsByLevel.size(); ++level) {
        List<WMPool> poolsOnLevel = poolsByLevel.get(level);
        for (WMPool pool : poolsOnLevel) {
            String fullName = pool.getPoolPath();
            int qp = pool.getQueryParallelism();
            double fraction = pool.getAllocFraction();
            if (level > 0) {
                String parentName = fullName.substring(0, fullName.lastIndexOf(POOL_SEPARATOR));
                PoolState parent = pools.get(parentName);
                fraction = parent.finalFraction * fraction;
                parent.finalFractionRemaining -= fraction;
            }
            PoolState state = oldPools == null ? null : oldPools.remove(fullName);
            if (state == null) {
                state = new PoolState(fullName, qp, fraction, pool.getSchedulingPolicy());
            } else {
                // This will also take care of the queries if query parallelism changed.
                state.update(qp, fraction, syncWork, e, pool.getSchedulingPolicy());
                poolsToRedistribute.add(fullName);
            }
            state.setTriggers(new LinkedList<Trigger>());
            LOG.info("Adding Hive pool: " + state);
            pools.put(fullName, state);
            totalQueryParallelism += qp;
        }
    }
    // GLOBAL - all pools inherit
    if (plan != null && plan.isSetTriggers() && plan.isSetPoolTriggers()) {
        Map<String, Trigger> triggers = new HashMap<>();
        for (WMTrigger trigger : plan.getTriggers()) {
            ExecutionTrigger execTrigger = ExecutionTrigger.fromWMTrigger(trigger);
            triggers.put(trigger.getTriggerName(), execTrigger);
        }
        for (WMPoolTrigger poolTrigger : plan.getPoolTriggers()) {
            PoolState pool = pools.get(poolTrigger.getPool());
            Trigger trigger = triggers.get(poolTrigger.getTrigger());
            pool.triggers.add(trigger);
            poolsToRedistribute.add(pool.fullName);
            LOG.info("Adding pool " + pool.fullName + " trigger " + trigger);
        }
    }
    if (oldPools != null && !oldPools.isEmpty()) {
        // Looks like some pools were removed; kill running queries, re-queue the queued ones.
        for (PoolState oldPool : oldPools.values()) {
            oldPool.destroy(syncWork, e.getRequests, e.toReuse);
        }
    }
    LOG.info("Updating with " + totalQueryParallelism + " total query parallelism");
    int deltaSessions = totalQueryParallelism - this.totalQueryParallelism;
    this.totalQueryParallelism = totalQueryParallelism;
    // Nothing to do.
    if (deltaSessions == 0)
        return;
    if (deltaSessions < 0) {
        // First, see if we have sessions that we were planning to restart/kill; get rid of those.
        deltaSessions = transferSessionsToDestroy(syncWork.toKillQuery.keySet(), syncWork.toDestroyNoRestart, deltaSessions);
        deltaSessions = transferSessionsToDestroy(syncWork.toRestartInUse, syncWork.toDestroyNoRestart, deltaSessions);
    }
    if (deltaSessions != 0) {
        failOnFutureFailure(tezAmPool.resizeAsync(deltaSessions, syncWork.toDestroyNoRestart));
    }
}
Also used : ExecutionTrigger(org.apache.hadoop.hive.ql.wm.ExecutionTrigger) IdentityHashMap(java.util.IdentityHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) WMPool(org.apache.hadoop.hive.metastore.api.WMPool) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) Trigger(org.apache.hadoop.hive.ql.wm.Trigger) WMPoolTrigger(org.apache.hadoop.hive.metastore.api.WMPoolTrigger) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) ExecutionTrigger(org.apache.hadoop.hive.ql.wm.ExecutionTrigger) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) WMPoolTrigger(org.apache.hadoop.hive.metastore.api.WMPoolTrigger)

Aggregations

WMPool (org.apache.hadoop.hive.metastore.api.WMPool)8 WMFullResourcePlan (org.apache.hadoop.hive.metastore.api.WMFullResourcePlan)4 WMPoolTrigger (org.apache.hadoop.hive.metastore.api.WMPoolTrigger)3 Trigger (org.apache.hadoop.hive.ql.wm.Trigger)3 WMResourcePlan (org.apache.hadoop.hive.metastore.api.WMResourcePlan)2 WorkloadManager (org.apache.hadoop.hive.ql.exec.tez.WorkloadManager)2 ExecutionTrigger (org.apache.hadoop.hive.ql.wm.ExecutionTrigger)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 IdentityHashMap (java.util.IdentityHashMap)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 CommonTree (org.antlr.runtime.tree.CommonTree)1 Tree (org.antlr.runtime.tree.Tree)1 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)1 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)1 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)1 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)1 WMTrigger (org.apache.hadoop.hive.metastore.api.WMTrigger)1