Search in sources :

Example 16 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class HiveAuthorizationTaskFactoryImpl method createShowRoleGrantTask.

@Override
public Task<? extends Serializable> createShowRoleGrantTask(ASTNode ast, Path resultFile, HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs) {
    ASTNode child = (ASTNode) ast.getChild(0);
    PrincipalType principalType = PrincipalType.USER;
    switch(child.getType()) {
        case HiveParser.TOK_USER:
            principalType = PrincipalType.USER;
            break;
        case HiveParser.TOK_GROUP:
            principalType = PrincipalType.GROUP;
            break;
        case HiveParser.TOK_ROLE:
            principalType = PrincipalType.ROLE;
            break;
    }
    String principalName = BaseSemanticAnalyzer.unescapeIdentifier(child.getChild(0).getText());
    RoleDDLDesc roleDesc = new RoleDDLDesc(principalName, principalType, RoleDDLDesc.RoleOperation.SHOW_ROLE_GRANT, null);
    roleDesc.setResFile(resultFile.toString());
    return TaskFactory.get(new DDLWork(inputs, outputs, roleDesc), conf);
}
Also used : DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) RoleDDLDesc(org.apache.hadoop.hive.ql.plan.RoleDDLDesc) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType)

Example 17 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class MatchPath method createSelectListRR.

/*
   * add array<struct> to the list of columns
   */
protected static RowResolver createSelectListRR(MatchPath evaluator, PTFInputDef inpDef) throws SemanticException {
    RowResolver rr = new RowResolver();
    RowResolver inputRR = inpDef.getOutputShape().getRr();
    evaluator.inputColumnNamesMap = new HashMap<String, String>();
    ArrayList<String> inputColumnNames = new ArrayList<String>();
    ArrayList<ObjectInspector> inpColOIs = new ArrayList<ObjectInspector>();
    for (ColumnInfo inpCInfo : inputRR.getColumnInfos()) {
        ColumnInfo cInfo = new ColumnInfo(inpCInfo);
        String colAlias = cInfo.getAlias();
        String[] tabColAlias = inputRR.reverseLookup(inpCInfo.getInternalName());
        if (tabColAlias != null) {
            colAlias = tabColAlias[1];
        }
        ASTNode inExpr = null;
        inExpr = PTFTranslator.getASTNode(inpCInfo, inputRR);
        if (inExpr != null) {
            rr.putExpression(inExpr, cInfo);
            colAlias = inExpr.toStringTree().toLowerCase();
        } else {
            colAlias = colAlias == null ? cInfo.getInternalName() : colAlias;
            rr.put(cInfo.getTabAlias(), colAlias, cInfo);
        }
        evaluator.inputColumnNamesMap.put(cInfo.getInternalName(), colAlias);
        inputColumnNames.add(colAlias);
        inpColOIs.add(cInfo.getObjectInspector());
    }
    StandardListObjectInspector pathAttrOI = ObjectInspectorFactory.getStandardListObjectInspector(ObjectInspectorFactory.getStandardStructObjectInspector(inputColumnNames, inpColOIs));
    ColumnInfo pathColumn = new ColumnInfo(PATHATTR_NAME, TypeInfoUtils.getTypeInfoFromObjectInspector(pathAttrOI), null, false, false);
    rr.put(null, PATHATTR_NAME, pathColumn);
    return rr;
}
Also used : PrimitiveObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) StandardListObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StandardListObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) ConstantObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector) ArrayList(java.util.ArrayList) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) StandardListObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StandardListObjectInspector) RowResolver(org.apache.hadoop.hive.ql.parse.RowResolver)

Example 18 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class TestTransactionStatement method testAutoCommit.

@Test
public void testAutoCommit() throws ParseException {
    ASTNode ast = parse("SET AUTOCOMMIT TRUE");
    Assert.assertEquals("AST doesn't match", "(tok_set_autocommit tok_true)", ast.toStringTree());
    ast = parse("SET AUTOCOMMIT FALSE");
    Assert.assertEquals("AST doesn't match", "(tok_set_autocommit tok_false)", ast.toStringTree());
}
Also used : ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) Test(org.junit.Test)

Example 19 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class TestTransactionStatement method testTxnCommitRollback.

@Test
public void testTxnCommitRollback() throws ParseException {
    ASTNode ast = parse("COMMIT");
    Assert.assertEquals("AST doesn't match", "tok_commit", ast.toStringTree());
    ast = parse("COMMIT WORK");
    Assert.assertEquals("AST doesn't match", "tok_commit", ast.toStringTree());
    ast = parse("ROLLBACK");
    Assert.assertEquals("AST doesn't match", "tok_rollback", ast.toStringTree());
    ast = parse("ROLLBACK WORK");
    Assert.assertEquals("AST doesn't match", "tok_rollback", ast.toStringTree());
}
Also used : ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) Test(org.junit.Test)

Example 20 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class HiveGBOpConvUtil method getGBInfo.

// For each of the GB op in the logical GB this should be called seperately;
// otherwise GBevaluator and expr nodes may get shared among multiple GB ops
private static GBInfo getGBInfo(HiveAggregate aggRel, OpAttr inputOpAf, HiveConf hc) throws SemanticException {
    GBInfo gbInfo = new GBInfo();
    // 0. Collect AggRel output col Names
    gbInfo.outputColNames.addAll(aggRel.getRowType().getFieldNames());
    // 1. Collect GB Keys
    RelNode aggInputRel = aggRel.getInput();
    ExprNodeConverter exprConv = new ExprNodeConverter(inputOpAf.tabAlias, aggInputRel.getRowType(), new HashSet<Integer>(), aggRel.getCluster().getTypeFactory(), true);
    ExprNodeDesc tmpExprNodeDesc;
    for (int i : aggRel.getGroupSet()) {
        RexInputRef iRef = new RexInputRef(i, aggInputRel.getRowType().getFieldList().get(i).getType());
        tmpExprNodeDesc = iRef.accept(exprConv);
        gbInfo.gbKeys.add(tmpExprNodeDesc);
        gbInfo.gbKeyColNamesInInput.add(aggInputRel.getRowType().getFieldNames().get(i));
        gbInfo.gbKeyTypes.add(tmpExprNodeDesc.getTypeInfo());
    }
    // 2. Collect Grouping Set info
    if (aggRel.indicator) {
        // 2.1 Translate Grouping set col bitset
        ImmutableList<ImmutableBitSet> lstGrpSet = aggRel.getGroupSets();
        int bitmap = 0;
        for (ImmutableBitSet grpSet : lstGrpSet) {
            bitmap = 0;
            for (Integer bitIdx : grpSet.asList()) {
                bitmap = SemanticAnalyzer.setBit(bitmap, bitIdx);
            }
            gbInfo.grpSets.add(bitmap);
        }
        Collections.sort(gbInfo.grpSets);
        // 2.2 Check if GRpSet require additional MR Job
        gbInfo.grpSetRqrAdditionalMRJob = gbInfo.grpSets.size() > hc.getIntVar(HiveConf.ConfVars.HIVE_NEW_JOB_GROUPING_SET_CARDINALITY);
        // 2.3 Check if GROUPING_ID needs to be projected out
        if (!aggRel.getAggCallList().isEmpty() && (aggRel.getAggCallList().get(aggRel.getAggCallList().size() - 1).getAggregation() == HiveGroupingID.INSTANCE)) {
            gbInfo.grpIdFunctionNeeded = true;
        }
    }
    // 3. Walk through UDAF & Collect Distinct Info
    Set<Integer> distinctRefs = new HashSet<Integer>();
    Map<Integer, Integer> distParamInRefsToOutputPos = new HashMap<Integer, Integer>();
    for (AggregateCall aggCall : aggRel.getAggCallList()) {
        if ((aggCall.getAggregation() == HiveGroupingID.INSTANCE) || !aggCall.isDistinct()) {
            continue;
        }
        List<Integer> argLst = new ArrayList<Integer>(aggCall.getArgList());
        List<String> argNames = HiveCalciteUtil.getFieldNames(argLst, aggInputRel);
        ExprNodeDesc distinctExpr;
        for (int i = 0; i < argLst.size(); i++) {
            if (!distinctRefs.contains(argLst.get(i))) {
                distinctRefs.add(argLst.get(i));
                distinctExpr = HiveCalciteUtil.getExprNode(argLst.get(i), aggInputRel, exprConv);
                // Only distinct nodes that are NOT part of the key should be added to distExprNodes
                if (ExprNodeDescUtils.indexOf(distinctExpr, gbInfo.gbKeys) < 0) {
                    distParamInRefsToOutputPos.put(argLst.get(i), gbInfo.distExprNodes.size());
                    gbInfo.distExprNodes.add(distinctExpr);
                    gbInfo.distExprNames.add(argNames.get(i));
                    gbInfo.distExprTypes.add(distinctExpr.getTypeInfo());
                }
            }
        }
    }
    // 4. Walk through UDAF & Collect UDAF Info
    Set<Integer> deDupedNonDistIrefsSet = new HashSet<Integer>();
    for (AggregateCall aggCall : aggRel.getAggCallList()) {
        if (aggCall.getAggregation() == HiveGroupingID.INSTANCE) {
            continue;
        }
        UDAFAttrs udafAttrs = new UDAFAttrs();
        List<ExprNodeDesc> argExps = HiveCalciteUtil.getExprNodes(aggCall.getArgList(), aggInputRel, inputOpAf.tabAlias);
        udafAttrs.udafParams.addAll(argExps);
        udafAttrs.udafName = aggCall.getAggregation().getName();
        udafAttrs.argList = aggCall.getArgList();
        udafAttrs.isDistinctUDAF = aggCall.isDistinct();
        List<Integer> argLst = new ArrayList<Integer>(aggCall.getArgList());
        List<Integer> distColIndicesOfUDAF = new ArrayList<Integer>();
        List<Integer> distUDAFParamsIndxInDistExprs = new ArrayList<Integer>();
        for (int i = 0; i < argLst.size(); i++) {
            // NOTE: distinct expr can be part of of GB key
            if (udafAttrs.isDistinctUDAF) {
                ExprNodeDesc argExpr = argExps.get(i);
                Integer found = ExprNodeDescUtils.indexOf(argExpr, gbInfo.gbKeys);
                distColIndicesOfUDAF.add(found < 0 ? distParamInRefsToOutputPos.get(argLst.get(i)) + gbInfo.gbKeys.size() + (gbInfo.grpSets.size() > 0 ? 1 : 0) : found);
                distUDAFParamsIndxInDistExprs.add(distParamInRefsToOutputPos.get(argLst.get(i)));
            } else {
                // TODO: this seems wrong (following what Hive Regular does)
                if (!distParamInRefsToOutputPos.containsKey(argLst.get(i)) && !deDupedNonDistIrefsSet.contains(argLst.get(i))) {
                    deDupedNonDistIrefsSet.add(argLst.get(i));
                    gbInfo.deDupedNonDistIrefs.add(udafAttrs.udafParams.get(i));
                }
            }
        }
        if (udafAttrs.isDistinctUDAF) {
            gbInfo.containsDistinctAggr = true;
            udafAttrs.udafParamsIndxInGBInfoDistExprs = distUDAFParamsIndxInDistExprs;
            gbInfo.distColIndices.add(distColIndicesOfUDAF);
        }
        // special handling for count, similar to PlanModifierForASTConv::replaceEmptyGroupAggr()
        udafAttrs.udafEvaluator = SemanticAnalyzer.getGenericUDAFEvaluator(udafAttrs.udafName, new ArrayList<ExprNodeDesc>(udafAttrs.udafParams), new ASTNode(), udafAttrs.isDistinctUDAF, udafAttrs.udafParams.size() == 0 && "count".equalsIgnoreCase(udafAttrs.udafName) ? true : false);
        gbInfo.udafAttrs.add(udafAttrs);
    }
    // 4. Gather GB Memory threshold
    gbInfo.groupByMemoryUsage = HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY);
    gbInfo.memoryThreshold = HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD);
    // 5. Gather GB Physical pipeline (based on user config & Grping Sets size)
    gbInfo.gbPhysicalPipelineMode = getAggOPMode(hc, gbInfo);
    return gbInfo;
}
Also used : ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) AggregateCall(org.apache.calcite.rel.core.AggregateCall) RelNode(org.apache.calcite.rel.RelNode) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) RexInputRef(org.apache.calcite.rex.RexInputRef) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) HashSet(java.util.HashSet)

Aggregations

ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)44 ArrayList (java.util.ArrayList)8 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)5 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)5 WindowingException (com.sap.hadoop.windowing.WindowingException)4 InputInfo (com.sap.hadoop.windowing.query2.translate.QueryTranslationInfo.InputInfo)4 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)4 PrincipalDesc (org.apache.hadoop.hive.ql.plan.PrincipalDesc)4 PrivilegeObjectDesc (org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc)4 HashMap (java.util.HashMap)3 LinkedHashMap (java.util.LinkedHashMap)3 RexNode (org.apache.calcite.rex.RexNode)3 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)3 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)3 ObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector)3 ArgDef (com.sap.hadoop.windowing.query2.definition.ArgDef)2 QueryInputDef (com.sap.hadoop.windowing.query2.definition.QueryInputDef)2 IOException (java.io.IOException)2 BigDecimal (java.math.BigDecimal)2 RexInputRef (org.apache.calcite.rex.RexInputRef)2