Search in sources :

Example 1 with PartitionedTableFunctionSpec

use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserPTFInvocationSpec.PartitionedTableFunctionSpec in project flink by apache.

the class HiveParserSemanticAnalyzer method processPTF.

/*
     * - invoked during FROM AST tree processing, on encountering a PTF invocation.
     * - tree form is
     *   ^(TOK_PTBLFUNCTION name partitionTableFunctionSource partitioningSpec? arguments*)
     * - setup a HiveParserPTFInvocationSpec for this top level PTF invocation.
     */
private void processPTF(HiveParserQB qb, HiveParserASTNode ptf) throws SemanticException {
    PartitionedTableFunctionSpec ptfSpec = processPTFChain(qb, ptf);
    if (ptfSpec.getAlias() != null) {
        qb.addAlias(ptfSpec.getAlias());
    }
    HiveParserPTFInvocationSpec spec = new HiveParserPTFInvocationSpec();
    spec.setFunction(ptfSpec);
    qb.addPTFNodeToSpec(ptf, spec);
}
Also used : PartitionedTableFunctionSpec(org.apache.flink.table.planner.delegation.hive.copy.HiveParserPTFInvocationSpec.PartitionedTableFunctionSpec)

Example 2 with PartitionedTableFunctionSpec

use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserPTFInvocationSpec.PartitionedTableFunctionSpec in project flink by apache.

the class HiveParserSemanticAnalyzer method processPTFChain.

/*
     * - tree form is
     *   ^(TOK_PTBLFUNCTION name alias? partitionTableFunctionSource partitioningSpec? arguments*)
     * - a partitionTableFunctionSource can be a tableReference, a SubQuery or another
     *   PTF invocation.
     */
private PartitionedTableFunctionSpec processPTFChain(HiveParserQB qb, HiveParserASTNode ptf) throws SemanticException {
    int childCount = ptf.getChildCount();
    if (childCount < 2) {
        throw new SemanticException(HiveParserUtils.generateErrorMessage(ptf, "Not enough Children " + childCount));
    }
    PartitionedTableFunctionSpec ptfSpec = new PartitionedTableFunctionSpec();
    ptfSpec.setAstNode(ptf);
    // name
    HiveParserASTNode nameNode = (HiveParserASTNode) ptf.getChild(0);
    ptfSpec.setName(nameNode.getText());
    int inputIdx = 1;
    // alias
    HiveParserASTNode secondChild = (HiveParserASTNode) ptf.getChild(1);
    if (secondChild.getType() == HiveASTParser.Identifier) {
        ptfSpec.setAlias(secondChild.getText());
        inputIdx++;
    }
    // input
    HiveParserASTNode inputNode = (HiveParserASTNode) ptf.getChild(inputIdx);
    ptfSpec.setInput(processPTFSource(qb, inputNode));
    int argStartIdx = inputIdx + 1;
    // partitioning Spec
    int pSpecIdx = inputIdx + 1;
    HiveParserASTNode pSpecNode = ptf.getChildCount() > inputIdx ? (HiveParserASTNode) ptf.getChild(pSpecIdx) : null;
    if (pSpecNode != null && pSpecNode.getType() == HiveASTParser.TOK_PARTITIONINGSPEC) {
        PartitioningSpec partitioning = processPTFPartitionSpec(pSpecNode);
        ptfSpec.setPartitioning(partitioning);
        argStartIdx++;
    }
    // arguments
    for (int i = argStartIdx; i < ptf.getChildCount(); i++) {
        ptfSpec.addArg((HiveParserASTNode) ptf.getChild(i));
    }
    return ptfSpec;
}
Also used : PartitionedTableFunctionSpec(org.apache.flink.table.planner.delegation.hive.copy.HiveParserPTFInvocationSpec.PartitionedTableFunctionSpec) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) PartitioningSpec(org.apache.flink.table.planner.delegation.hive.copy.HiveParserPTFInvocationSpec.PartitioningSpec)

Aggregations

PartitionedTableFunctionSpec (org.apache.flink.table.planner.delegation.hive.copy.HiveParserPTFInvocationSpec.PartitionedTableFunctionSpec)2 PartitioningSpec (org.apache.flink.table.planner.delegation.hive.copy.HiveParserPTFInvocationSpec.PartitioningSpec)1 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)1