Search in sources :

Example 1 with WMNullableResourcePlan

use of org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan in project hive by apache.

the class DDLSemanticAnalyzer method analyzeAlterResourcePlan.

private void analyzeAlterResourcePlan(ASTNode ast) throws SemanticException {
    if (ast.getChildCount() < 1) {
        throw new SemanticException("Incorrect syntax");
    }
    Tree nameOrGlobal = ast.getChild(0);
    switch(nameOrGlobal.getType()) {
        case HiveParser.TOK_ENABLE:
            // This command exists solely to output this message. TODO: can we do it w/o an error?
            throw new SemanticException("Activate a resource plan to enable workload management");
        case HiveParser.TOK_DISABLE:
            WMNullableResourcePlan anyRp = new WMNullableResourcePlan();
            anyRp.setStatus(WMResourcePlanStatus.ENABLED);
            AlterResourcePlanDesc desc = new AlterResourcePlanDesc(anyRp, null, false, false, true, false);
            addServiceOutput();
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
            return;
        // Continue to handle changes to a specific plan.
        default:
    }
    if (ast.getChildCount() < 2) {
        throw new SemanticException("Invalid syntax for ALTER RESOURCE PLAN statement");
    }
    String rpName = unescapeIdentifier(ast.getChild(0).getText());
    WMNullableResourcePlan resourcePlan = new WMNullableResourcePlan();
    boolean isEnableActivate = false, isReplace = false;
    boolean validate = false;
    for (int i = 1; i < ast.getChildCount(); ++i) {
        Tree child = ast.getChild(i);
        switch(child.getType()) {
            case HiveParser.TOK_VALIDATE:
                validate = true;
                break;
            case HiveParser.TOK_ACTIVATE:
                if (resourcePlan.getStatus() == WMResourcePlanStatus.ENABLED) {
                    isEnableActivate = true;
                }
                if (child.getChildCount() > 1) {
                    throw new SemanticException("Expected 0 or 1 arguments " + ast.toStringTree());
                } else if (child.getChildCount() == 1) {
                    if (child.getChild(0).getType() != HiveParser.TOK_REPLACE) {
                        throw new SemanticException("Incorrect syntax " + ast.toStringTree());
                    }
                    isReplace = true;
                    // Implied.
                    isEnableActivate = false;
                }
                resourcePlan.setStatus(WMResourcePlanStatus.ACTIVE);
                break;
            case HiveParser.TOK_ENABLE:
                if (resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE) {
                    isEnableActivate = !isReplace;
                } else {
                    resourcePlan.setStatus(WMResourcePlanStatus.ENABLED);
                }
                break;
            case HiveParser.TOK_DISABLE:
                resourcePlan.setStatus(WMResourcePlanStatus.DISABLED);
                break;
            case HiveParser.TOK_REPLACE:
                isReplace = true;
                if (child.getChildCount() > 1) {
                    throw new SemanticException("Expected 0 or 1 arguments " + ast.toStringTree());
                } else if (child.getChildCount() == 1) {
                    // Replace is essentially renaming a plan to the name of an existing plan, with backup.
                    resourcePlan.setName(unescapeIdentifier(child.getChild(0).getText()));
                } else {
                    resourcePlan.setStatus(WMResourcePlanStatus.ACTIVE);
                }
                break;
            case HiveParser.TOK_QUERY_PARALLELISM:
                {
                    if (child.getChildCount() != 1) {
                        throw new SemanticException("Expected one argument");
                    }
                    Tree val = child.getChild(0);
                    resourcePlan.setIsSetQueryParallelism(true);
                    if (val.getType() == HiveParser.TOK_NULL) {
                        resourcePlan.unsetQueryParallelism();
                    } else {
                        resourcePlan.setQueryParallelism(Integer.parseInt(val.getText()));
                    }
                    break;
                }
            case HiveParser.TOK_DEFAULT_POOL:
                {
                    if (child.getChildCount() != 1) {
                        throw new SemanticException("Expected one argument");
                    }
                    Tree val = child.getChild(0);
                    resourcePlan.setIsSetDefaultPoolPath(true);
                    if (val.getType() == HiveParser.TOK_NULL) {
                        resourcePlan.unsetDefaultPoolPath();
                    } else {
                        resourcePlan.setDefaultPoolPath(poolPath(child.getChild(0)));
                    }
                    break;
                }
            case HiveParser.TOK_RENAME:
                if (child.getChildCount() != 1) {
                    throw new SemanticException("Expected one argument");
                }
                resourcePlan.setName(unescapeIdentifier(child.getChild(0).getText()));
                break;
            default:
                throw new SemanticException("Unexpected token in alter resource plan statement: " + child.getType());
        }
    }
    AlterResourcePlanDesc desc = new AlterResourcePlanDesc(resourcePlan, rpName, validate, isEnableActivate, false, isReplace);
    if (validate) {
        ctx.setResFile(ctx.getLocalTmpPath());
        desc.setResFile(ctx.getResFile().toString());
    }
    addServiceOutput();
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
    if (validate) {
        setFetchTask(createFetchTask(AlterResourcePlanDesc.getSchema()));
    }
}
Also used : WMNullableResourcePlan(org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) CommonTree(org.antlr.runtime.tree.CommonTree) Tree(org.antlr.runtime.tree.Tree) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) NotNullConstraint(org.apache.hadoop.hive.ql.metadata.NotNullConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) AlterResourcePlanDesc(org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc)

Example 2 with WMNullableResourcePlan

use of org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan in project hive by apache.

the class DDLTask method alterResourcePlan.

private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveException {
    if (desc.shouldValidate()) {
        WMValidateResourcePlanResponse result = db.validateResourcePlan(desc.getResourcePlanName());
        try (DataOutputStream out = getOutputStream(desc.getResFile())) {
            formatter.showErrors(out, result);
        } catch (IOException e) {
            throw new HiveException(e);
        }
        ;
        return 0;
    }
    WMNullableResourcePlan resourcePlan = desc.getResourcePlan();
    final WorkloadManager wm = WorkloadManager.getInstance();
    final TezSessionPoolManager pm = TezSessionPoolManager.getInstance();
    boolean isActivate = false, isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
    if (resourcePlan.getStatus() != null) {
        isActivate = resourcePlan.getStatus() == WMResourcePlanStatus.ACTIVE;
    }
    WMFullResourcePlan appliedRp = db.alterResourcePlan(desc.getResourcePlanName(), resourcePlan, desc.isEnableActivate(), desc.isForceDeactivate(), desc.isReplace());
    boolean mustHaveAppliedChange = isActivate || desc.isForceDeactivate();
    if (!mustHaveAppliedChange && !desc.isReplace()) {
        // The modification cannot affect an active plan.
        return 0;
    }
    if (appliedRp == null && !mustHaveAppliedChange) {
        // Replacing an inactive plan.
        return 0;
    }
    if (wm == null && isInTest) {
        // Skip for tests if WM is not present.
        return 0;
    }
    if ((appliedRp == null) != desc.isForceDeactivate()) {
        throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable");
    // TODO: shut down HS2?
    }
    assert appliedRp == null || appliedRp.getPlan().getStatus() == WMResourcePlanStatus.ACTIVE;
    handleWorkloadManagementServiceChange(wm, pm, isActivate, appliedRp);
    return 0;
}
Also used : WMNullableResourcePlan(org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan) WMFullResourcePlan(org.apache.hadoop.hive.metastore.api.WMFullResourcePlan) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) WMValidateResourcePlanResponse(org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse) TezSessionPoolManager(org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager) IOException(java.io.IOException) WorkloadManager(org.apache.hadoop.hive.ql.exec.tez.WorkloadManager)

Aggregations

WMNullableResourcePlan (org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan)2 DataOutputStream (java.io.DataOutputStream)1 IOException (java.io.IOException)1 CommonTree (org.antlr.runtime.tree.CommonTree)1 Tree (org.antlr.runtime.tree.Tree)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)1 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)1 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)1 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)1 WMFullResourcePlan (org.apache.hadoop.hive.metastore.api.WMFullResourcePlan)1 WMValidateResourcePlanResponse (org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse)1 TezSessionPoolManager (org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager)1 WorkloadManager (org.apache.hadoop.hive.ql.exec.tez.WorkloadManager)1 DefaultConstraint (org.apache.hadoop.hive.ql.metadata.DefaultConstraint)1 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)1 NotNullConstraint (org.apache.hadoop.hive.ql.metadata.NotNullConstraint)1 AlterResourcePlanDesc (org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc)1 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)1