Search in sources :

Example 6 with WMTrigger

use of org.apache.hadoop.hive.metastore.api.WMTrigger in project hive by apache.

the class WorkloadManager method applyNewResourcePlanOnMasterThread.

private void applyNewResourcePlanOnMasterThread(EventState e, WmThreadSyncWork syncWork, HashSet<String> poolsToRedistribute) {
    int totalQueryParallelism = 0;
    WMFullResourcePlan plan = e.resourcePlanToApply;
    if (plan == null) {
        // NULL plan means WM is disabled via a command; it could still be reenabled.
        LOG.info("Disabling workload management because the resource plan has been removed");
        this.rpName = null;
        this.defaultPool = null;
        this.userPoolMapping = new UserPoolMapping(null, null);
    } else {
        this.rpName = plan.getPlan().getName();
        this.defaultPool = plan.getPlan().getDefaultPoolPath();
        this.userPoolMapping = new UserPoolMapping(plan.getMappings(), defaultPool);
    }
    // Note: we assume here that plan has been validated beforehand, so we don't verify
    // that fractions or query parallelism add up, etc.
    Map<String, PoolState> oldPools = pools;
    pools = new HashMap<>();
    ArrayList<List<WMPool>> poolsByLevel = new ArrayList<>();
    if (plan != null) {
        // first distribute them by levels, then add level by level.
        for (WMPool pool : plan.getPools()) {
            String fullName = pool.getPoolPath();
            int ix = StringUtils.countMatches(fullName, POOL_SEPARATOR_STR);
            while (poolsByLevel.size() <= ix) {
                // We expect all the levels to have items.
                poolsByLevel.add(new LinkedList<WMPool>());
            }
            poolsByLevel.get(ix).add(pool);
        }
    }
    for (int level = 0; level < poolsByLevel.size(); ++level) {
        List<WMPool> poolsOnLevel = poolsByLevel.get(level);
        for (WMPool pool : poolsOnLevel) {
            String fullName = pool.getPoolPath();
            int qp = pool.getQueryParallelism();
            double fraction = pool.getAllocFraction();
            if (level > 0) {
                String parentName = fullName.substring(0, fullName.lastIndexOf(POOL_SEPARATOR));
                PoolState parent = pools.get(parentName);
                fraction = parent.finalFraction * fraction;
                parent.finalFractionRemaining -= fraction;
            }
            PoolState state = oldPools == null ? null : oldPools.remove(fullName);
            if (state == null) {
                state = new PoolState(fullName, qp, fraction, pool.getSchedulingPolicy());
            } else {
                // This will also take care of the queries if query parallelism changed.
                state.update(qp, fraction, syncWork, e, pool.getSchedulingPolicy());
                poolsToRedistribute.add(fullName);
            }
            state.setTriggers(new LinkedList<Trigger>());
            LOG.info("Adding Hive pool: " + state);
            pools.put(fullName, state);
            totalQueryParallelism += qp;
        }
    }
    // GLOBAL - all pools inherit
    if (plan != null && plan.isSetTriggers() && plan.isSetPoolTriggers()) {
        Map<String, Trigger> triggers = new HashMap<>();
        for (WMTrigger trigger : plan.getTriggers()) {
            ExecutionTrigger execTrigger = ExecutionTrigger.fromWMTrigger(trigger);
            triggers.put(trigger.getTriggerName(), execTrigger);
        }
        for (WMPoolTrigger poolTrigger : plan.getPoolTriggers()) {
            PoolState pool = pools.get(poolTrigger.getPool());
            Trigger trigger = triggers.get(poolTrigger.getTrigger());
            pool.triggers.add(trigger);
            poolsToRedistribute.add(pool.fullName);
            LOG.info("Adding pool " + pool.fullName + " trigger " + trigger);
        }
    }
    if (oldPools != null && !oldPools.isEmpty()) {
        // Looks like some pools were removed; kill running queries, re-queue the queued ones.
        for (PoolState oldPool : oldPools.values()) {
            oldPool.destroy(syncWork, e.getRequests, e.toReuse);
        }
    }
    LOG.info("Updating with " + totalQueryParallelism + " total query parallelism");
    int deltaSessions = totalQueryParallelism - this.totalQueryParallelism;
    this.totalQueryParallelism = totalQueryParallelism;
    // Nothing to do.
    if (deltaSessions == 0)
        return;
    if (deltaSessions < 0) {
        // First, see if we have sessions that we were planning to restart/kill; get rid of those.
        deltaSessions = transferSessionsToDestroy(syncWork.toKillQuery.keySet(), syncWork.toDestroyNoRestart, deltaSessions);
        deltaSessions = transferSessionsToDestroy(syncWork.toRestartInUse, syncWork.toDestroyNoRestart, deltaSessions);
    }
    if (deltaSessions != 0) {
        failOnFutureFailure(tezAmPool.resizeAsync(deltaSessions, syncWork.toDestroyNoRestart));
    }
}
Also used : ExecutionTrigger(org.apache.hadoop.hive.ql.wm.ExecutionTrigger) IdentityHashMap(java.util.IdentityHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) WMPool(org.apache.hadoop.hive.metastore.api.WMPool) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) Trigger(org.apache.hadoop.hive.ql.wm.Trigger) WMPoolTrigger(org.apache.hadoop.hive.metastore.api.WMPoolTrigger) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) ExecutionTrigger(org.apache.hadoop.hive.ql.wm.ExecutionTrigger) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) WMPoolTrigger(org.apache.hadoop.hive.metastore.api.WMPoolTrigger)

Example 7 with WMTrigger

use of org.apache.hadoop.hive.metastore.api.WMTrigger in project hive by apache.

the class DDLTask method createOrDropTriggerToPoolMapping.

private int createOrDropTriggerToPoolMapping(Hive db, CreateOrDropTriggerToPoolMappingDesc desc) throws HiveException {
    if (!desc.isUnmanagedPool()) {
        db.createOrDropTriggerToPoolMapping(desc.getResourcePlanName(), desc.getTriggerName(), desc.getPoolPath(), desc.shouldDrop());
    } else {
        assert desc.getPoolPath() == null;
        WMTrigger trigger = new WMTrigger(desc.getResourcePlanName(), desc.getTriggerName());
        // If we are dropping from unmanaged, unset the flag; and vice versa
        trigger.setIsInUnmanaged(!desc.shouldDrop());
        db.alterWMTrigger(trigger);
    }
    return 0;
}
Also used : WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger)

Example 8 with WMTrigger

use of org.apache.hadoop.hive.metastore.api.WMTrigger in project hive by apache.

the class DDLSemanticAnalyzer method analyzeAlterTrigger.

private void analyzeAlterTrigger(ASTNode ast) throws SemanticException {
    if (ast.getChildCount() != 4) {
        throw new SemanticException("Invalid syntax for alter trigger statement");
    }
    String rpName = unescapeIdentifier(ast.getChild(0).getText());
    String triggerName = unescapeIdentifier(ast.getChild(1).getText());
    String triggerExpression = buildTriggerExpression((ASTNode) ast.getChild(2));
    String actionExpression = buildTriggerActionExpression((ASTNode) ast.getChild(3));
    WMTrigger trigger = new WMTrigger(rpName, triggerName);
    trigger.setTriggerExpression(triggerExpression);
    trigger.setActionExpression(actionExpression);
    AlterWMTriggerDesc desc = new AlterWMTriggerDesc(trigger);
    addServiceOutput();
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Also used : DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) AlterWMTriggerDesc(org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc)

Example 9 with WMTrigger

use of org.apache.hadoop.hive.metastore.api.WMTrigger in project hive by apache.

the class TestJsonRPFormatter method addTrigger.

private void addTrigger(WMFullResourcePlan fullRp, String triggerName, String action, String expr, String poolName) {
    WMTrigger trigger = new WMTrigger(fullRp.getPlan().getName(), triggerName);
    trigger.setActionExpression(action);
    trigger.setTriggerExpression(expr);
    fullRp.addToTriggers(trigger);
    WMPoolTrigger pool2Trigger = new WMPoolTrigger(poolName, triggerName);
    fullRp.addToPoolTriggers(pool2Trigger);
}
Also used : WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger) WMPoolTrigger(org.apache.hadoop.hive.metastore.api.WMPoolTrigger)

Example 10 with WMTrigger

use of org.apache.hadoop.hive.metastore.api.WMTrigger in project hive by apache.

the class ObjectStore method fromMWMTrigger.

private WMTrigger fromMWMTrigger(MWMTrigger mTrigger, String resourcePlanName) {
    WMTrigger trigger = new WMTrigger();
    trigger.setResourcePlanName(resourcePlanName);
    trigger.setTriggerName(mTrigger.getName());
    trigger.setTriggerExpression(mTrigger.getTriggerExpression());
    trigger.setActionExpression(mTrigger.getActionExpression());
    trigger.setIsInUnmanaged(mTrigger.getIsInUnmanaged());
    return trigger;
}
Also used : MWMTrigger(org.apache.hadoop.hive.metastore.model.MWMTrigger) WMTrigger(org.apache.hadoop.hive.metastore.api.WMTrigger)

Aggregations

WMTrigger (org.apache.hadoop.hive.metastore.api.WMTrigger)11 ExecutionTrigger (org.apache.hadoop.hive.ql.wm.ExecutionTrigger)4 Trigger (org.apache.hadoop.hive.ql.wm.Trigger)4 ArrayList (java.util.ArrayList)3 WMFullResourcePlan (org.apache.hadoop.hive.metastore.api.WMFullResourcePlan)3 LinkedList (java.util.LinkedList)2 List (java.util.List)2 WMPoolTrigger (org.apache.hadoop.hive.metastore.api.WMPoolTrigger)2 WMResourcePlan (org.apache.hadoop.hive.metastore.api.WMResourcePlan)2 MWMTrigger (org.apache.hadoop.hive.metastore.model.MWMTrigger)2 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)2 HashMap (java.util.HashMap)1 IdentityHashMap (java.util.IdentityHashMap)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 Query (javax.jdo.Query)1 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)1 WMPool (org.apache.hadoop.hive.metastore.api.WMPool)1 MStringList (org.apache.hadoop.hive.metastore.model.MStringList)1 MWMResourcePlan (org.apache.hadoop.hive.metastore.model.MWMResourcePlan)1 AlterWMTriggerDesc (org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc)1