Search in sources :

Example 86 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class DescTableAnalyzer method getPartitionSpec.

private Map<String, String> getPartitionSpec(Hive db, ASTNode node, TableName tableName) throws SemanticException {
    // if this node has only one child, then no partition spec specified.
    if (node.getChildCount() == 1) {
        return null;
    }
    // if the ast has 3 children, the second *has to* be partition spec
    if (node.getChildCount() > 2 && (((ASTNode) node.getChild(1)).getType() != HiveParser.TOK_PARTSPEC)) {
        throw new SemanticException(((ASTNode) node.getChild(1)).getType() + " is not a partition specification");
    }
    if (((ASTNode) node.getChild(1)).getType() == HiveParser.TOK_PARTSPEC) {
        ASTNode partNode = (ASTNode) node.getChild(1);
        Table tab = null;
        try {
            tab = db.getTable(tableName.getNotEmptyDbTable());
        } catch (InvalidTableException e) {
            throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName.getNotEmptyDbTable()), e);
        } catch (HiveException e) {
            throw new SemanticException(e.getMessage(), e);
        }
        Map<String, String> partitionSpec = null;
        try {
            partitionSpec = getValidatedPartSpec(tab, partNode, db.getConf(), false);
        } catch (SemanticException e) {
            // return null, continue processing for DESCRIBE table key
            return null;
        }
        if (partitionSpec != null) {
            Partition part = null;
            try {
                part = db.getPartition(tab, partitionSpec, false);
            } catch (HiveException e) {
                // return null, continue processing for DESCRIBE table key
                return null;
            }
            if (part == null) {
                throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(partitionSpec.toString()));
            }
            return partitionSpec;
        }
    }
    return null;
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 87 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class ShowLocksAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    ctx.setResFile(ctx.getLocalTmpPath());
    String tableName = null;
    Map<String, String> partitionSpec = null;
    boolean isExtended = false;
    if (root.getChildCount() >= 1) {
        // table for which show locks is being executed
        for (int i = 0; i < root.getChildCount(); i++) {
            ASTNode child = (ASTNode) root.getChild(i);
            if (child.getType() == HiveParser.TOK_TABTYPE) {
                tableName = DDLUtils.getFQName((ASTNode) child.getChild(0));
                // get partition metadata if partition specified
                if (child.getChildCount() == 2) {
                    ASTNode partitionSpecNode = (ASTNode) child.getChild(1);
                    partitionSpec = getValidatedPartSpec(getTable(tableName), partitionSpecNode, conf, false);
                }
            } else if (child.getType() == HiveParser.KW_EXTENDED) {
                isExtended = true;
            }
        }
    }
    assert txnManager != null : "Transaction manager should be set before calling analyze";
    ShowLocksDesc desc = new ShowLocksDesc(ctx.getResFile(), tableName, partitionSpec, isExtended, txnManager.useNewShowLocksFormat());
    Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
    rootTasks.add(task);
    task.setFetchSource(true);
    setFetchTask(createFetchTask(desc.getSchema()));
    // Need to initialize the lock manager
    ctx.setNeedLockMgr(true);
}
Also used : DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode)

Example 88 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class ASTConverter method buildUDTFAST.

private ASTNode buildUDTFAST(String functionName, List<ASTNode> children) {
    ASTNode node = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTION, "TOK_FUNCTION");
    node.addChild((ASTNode) ParseDriver.adaptor.create(HiveParser.Identifier, functionName));
    for (ASTNode c : children) {
        ParseDriver.adaptor.addChild(node, c);
    }
    return node;
}
Also used : ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode)

Example 89 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class ASTConverter method convert.

private ASTNode convert() throws CalciteSemanticException {
    /*
     * 1. Walk RelNode Graph; note from, where, gBy.. nodes.
     */
    new QBVisitor().go(root);
    /*
     * 2. convert from node.
     */
    QueryBlockInfo qb = convertSource(from);
    schema = qb.schema;
    hiveAST.from = ASTBuilder.construct(HiveParser.TOK_FROM, "TOK_FROM").add(qb.ast).node();
    /*
     * 3. convert filterNode
     */
    if (where != null) {
        ASTNode cond = where.getCondition().accept(new RexVisitor(schema, false, root.getCluster().getRexBuilder()));
        hiveAST.where = ASTBuilder.where(cond);
        planMapper.link(cond, where);
        planMapper.link(cond, RelTreeSignature.of(where));
    }
    /*
     * 4. GBy
     */
    if (groupBy != null) {
        ASTBuilder b;
        boolean groupingSetsExpression = false;
        Group aggregateType = groupBy.getGroupType();
        switch(aggregateType) {
            case SIMPLE:
                b = ASTBuilder.construct(HiveParser.TOK_GROUPBY, "TOK_GROUPBY");
                break;
            case ROLLUP:
            case CUBE:
            case OTHER:
                b = ASTBuilder.construct(HiveParser.TOK_GROUPING_SETS, "TOK_GROUPING_SETS");
                groupingSetsExpression = true;
                break;
            default:
                throw new CalciteSemanticException("Group type not recognized");
        }
        HiveAggregate hiveAgg = (HiveAggregate) groupBy;
        if (hiveAgg.getAggregateColumnsOrder() != null) {
            // Aggregation columns may have been sorted in specific order
            for (int pos : hiveAgg.getAggregateColumnsOrder()) {
                addRefToBuilder(b, groupBy.getGroupSet().nth(pos));
            }
            for (int pos = 0; pos < groupBy.getGroupCount(); pos++) {
                if (!hiveAgg.getAggregateColumnsOrder().contains(pos)) {
                    addRefToBuilder(b, groupBy.getGroupSet().nth(pos));
                }
            }
        } else {
            // Aggregation columns have not been reordered
            for (int i : groupBy.getGroupSet()) {
                addRefToBuilder(b, i);
            }
        }
        // Grouping sets expressions
        if (groupingSetsExpression) {
            for (ImmutableBitSet groupSet : groupBy.getGroupSets()) {
                ASTBuilder expression = ASTBuilder.construct(HiveParser.TOK_GROUPING_SETS_EXPRESSION, "TOK_GROUPING_SETS_EXPRESSION");
                for (int i : groupSet) {
                    addRefToBuilder(expression, i);
                }
                b.add(expression);
            }
        }
        if (!groupBy.getGroupSet().isEmpty()) {
            hiveAST.groupBy = b.node();
        }
        schema = new Schema(schema, groupBy);
    }
    /*
     * 5. Having
     */
    if (having != null) {
        ASTNode cond = having.getCondition().accept(new RexVisitor(schema, false, root.getCluster().getRexBuilder()));
        hiveAST.having = ASTBuilder.having(cond);
    }
    /*
     * 6. Project
     */
    ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_SELECT, "TOK_SELECT");
    if (select instanceof Project) {
        List<RexNode> childExps = ((Project) select).getProjects();
        if (childExps.isEmpty()) {
            RexLiteral r = select.getCluster().getRexBuilder().makeExactLiteral(new BigDecimal(1));
            ASTNode selectExpr = ASTBuilder.selectExpr(ASTBuilder.literal(r), "1");
            b.add(selectExpr);
        } else {
            int i = 0;
            for (RexNode r : childExps) {
                ASTNode expr = r.accept(new RexVisitor(schema, r instanceof RexLiteral, select.getCluster().getRexBuilder()));
                String alias = select.getRowType().getFieldNames().get(i++);
                ASTNode selectExpr = ASTBuilder.selectExpr(expr, alias);
                b.add(selectExpr);
            }
        }
        hiveAST.select = b.node();
    } else {
        // select is UDTF
        HiveTableFunctionScan udtf = (HiveTableFunctionScan) select;
        List<ASTNode> children = new ArrayList<>();
        RexCall call = (RexCall) udtf.getCall();
        for (RexNode r : call.getOperands()) {
            ASTNode expr = r.accept(new RexVisitor(schema, r instanceof RexLiteral, select.getCluster().getRexBuilder()));
            children.add(expr);
        }
        ASTBuilder sel = ASTBuilder.construct(HiveParser.TOK_SELEXPR, "TOK_SELEXPR");
        ASTNode function = buildUDTFAST(call.getOperator().getName(), children);
        sel.add(function);
        for (String alias : udtf.getRowType().getFieldNames()) {
            sel.add(HiveParser.Identifier, alias);
        }
        b.add(sel);
        hiveAST.select = b.node();
    }
    /*
     * 7. Order Use in Order By from the block above. RelNode has no pointer to
     * parent hence we need to go top down; but OB at each block really belong
     * to its src/from. Hence the need to pass in sort for each block from
     * its parent.
     * 8. Limit
     */
    convertOrderToASTNode(orderLimit);
    return hiveAST.getAST();
}
Also used : Group(org.apache.calcite.rel.core.Aggregate.Group) RexLiteral(org.apache.calcite.rex.RexLiteral) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) HiveTableFunctionScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan) ArrayList(java.util.ArrayList) BigDecimal(java.math.BigDecimal) RexCall(org.apache.calcite.rex.RexCall) Project(org.apache.calcite.rel.core.Project) HiveAggregate(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) RexNode(org.apache.calcite.rex.RexNode)

Example 90 with ASTNode

use of org.apache.hadoop.hive.ql.parse.ASTNode in project hive by apache.

the class ExprNodeConverter method visitOver.

@Override
public ExprNodeDesc visitOver(RexOver over) {
    if (!deep) {
        return null;
    }
    final RexWindow window = over.getWindow();
    final WindowSpec windowSpec = new WindowSpec();
    final PartitioningSpec partitioningSpec = getPSpec(window);
    windowSpec.setPartitioning(partitioningSpec);
    final WindowFrameSpec windowFrameSpec = getWindowRange(window);
    windowSpec.setWindowFrame(windowFrameSpec);
    WindowFunctionSpec wfs = new WindowFunctionSpec();
    wfs.setWindowSpec(windowSpec);
    final Schema schema = new Schema(tabAlias, inputRowType.getFieldList());
    final ASTNode wUDAFAst = new ASTConverter.RexVisitor(schema).visitOver(over);
    wfs.setExpression(wUDAFAst);
    ASTNode nameNode = (ASTNode) wUDAFAst.getChild(0);
    wfs.setName(nameNode.getText());
    for (int i = 1; i < wUDAFAst.getChildCount() - 1; i++) {
        ASTNode child = (ASTNode) wUDAFAst.getChild(i);
        wfs.addArg(child);
    }
    if (wUDAFAst.getText().equals("TOK_FUNCTIONSTAR")) {
        wfs.setStar(true);
    }
    String columnAlias = getWindowColumnAlias();
    wfs.setAlias(columnAlias);
    this.windowFunctionSpecs.add(wfs);
    return new ExprNodeColumnDesc(TypeConverter.convert(over.getType()), columnAlias, tabAlias, false);
}
Also used : RexVisitor(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor) Schema(org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.Schema) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) WindowFunctionSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec) RexWindow(org.apache.calcite.rex.RexWindow) DateString(org.apache.calcite.util.DateString) ByteString(org.apache.calcite.avatica.util.ByteString) TimestampString(org.apache.calcite.util.TimestampString) TimeString(org.apache.calcite.util.TimeString) NlsString(org.apache.calcite.util.NlsString) WindowSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowSpec) PartitioningSpec(org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitioningSpec) WindowFrameSpec(org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFrameSpec)

Aggregations

ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)116 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)37 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)24 ArrayList (java.util.ArrayList)21 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)13 HashMap (java.util.HashMap)11 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 Table (org.apache.hadoop.hive.ql.metadata.Table)10 Node (org.apache.hadoop.hive.ql.lib.Node)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)8 TableName (org.apache.hadoop.hive.common.TableName)7 ColumnInfo (org.apache.hadoop.hive.ql.exec.ColumnInfo)7 RowResolver (org.apache.hadoop.hive.ql.parse.RowResolver)7 ReadEntity (org.apache.hadoop.hive.ql.hooks.ReadEntity)6 RelNode (org.apache.calcite.rel.RelNode)5 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)5 Context (org.apache.hadoop.hive.ql.Context)5 ParseDriver (org.apache.hadoop.hive.ql.parse.ParseDriver)5 SemanticAnalyzer (org.apache.hadoop.hive.ql.parse.SemanticAnalyzer)5 WindowingException (com.sap.hadoop.windowing.WindowingException)4