Search in sources :

Example 11 with CalciteSemanticException

use of org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException in project hive by apache.

the class HiveSemiJoin method copy.

@Override
public SemiJoin copy(RelTraitSet traitSet, RexNode condition, RelNode left, RelNode right, JoinRelType joinType, boolean semiJoinDone) {
    try {
        final JoinInfo joinInfo = JoinInfo.of(left, right, condition);
        HiveSemiJoin semijoin = new HiveSemiJoin(getCluster(), traitSet, left, right, condition, joinInfo.leftKeys, joinInfo.rightKeys);
        // If available, copy state to registry for optimization rules
        HiveRulesRegistry registry = semijoin.getCluster().getPlanner().getContext().unwrap(HiveRulesRegistry.class);
        if (registry != null) {
            registry.copyPushedPredicates(this, semijoin);
        }
        return semijoin;
    } catch (InvalidRelException | CalciteSemanticException e) {
        // internal error.
        throw new AssertionError(e);
    }
}
Also used : JoinInfo(org.apache.calcite.rel.core.JoinInfo) InvalidRelException(org.apache.calcite.rel.InvalidRelException) HiveRulesRegistry(org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)

Example 12 with CalciteSemanticException

use of org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException in project hive by apache.

the class PlanModifierForASTConv method renameTopLevelSelectInResultSchema.

public static RelNode renameTopLevelSelectInResultSchema(final RelNode rootRel, Pair<RelNode, RelNode> topSelparentPair, List<FieldSchema> resultSchema) throws CalciteSemanticException {
    RelNode parentOforiginalProjRel = topSelparentPair.getKey();
    HiveProject originalProjRel = (HiveProject) topSelparentPair.getValue();
    // Assumption: top portion of tree could only be
    // (limit)?(OB)?(Project)....
    List<RexNode> rootChildExps = originalProjRel.getChildExps();
    if (resultSchema.size() != rootChildExps.size()) {
        // Safeguard against potential issues in CBO RowResolver construction. Disable CBO for now.
        LOG.error(PlanModifierUtil.generateInvalidSchemaMessage(originalProjRel, resultSchema, 0));
        throw new CalciteSemanticException("Result Schema didn't match Optimized Op Tree Schema");
    }
    List<String> newSelAliases = new ArrayList<String>();
    String colAlias;
    for (int i = 0; i < rootChildExps.size(); i++) {
        colAlias = resultSchema.get(i).getName();
        colAlias = getNewColAlias(newSelAliases, colAlias);
        newSelAliases.add(colAlias);
    }
    HiveProject replacementProjectRel = HiveProject.create(originalProjRel.getInput(), originalProjRel.getChildExps(), newSelAliases);
    if (rootRel == originalProjRel) {
        return replacementProjectRel;
    } else {
        parentOforiginalProjRel.replaceInput(0, replacementProjectRel);
        return rootRel;
    }
}
Also used : RelNode(org.apache.calcite.rel.RelNode) HiveProject(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject) ArrayList(java.util.ArrayList) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) RexNode(org.apache.calcite.rex.RexNode)

Example 13 with CalciteSemanticException

use of org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException in project hive by apache.

the class HiveMaterializedViewsRegistry method createTableScan.

private static RelNode createTableScan(Table viewTable) {
    // 0. Recreate cluster
    final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
    final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
    final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
    // 1. Create column schema
    final RowResolver rr = new RowResolver();
    // 1.1 Add Column info for non partion cols (Object Inspector fields)
    StructObjectInspector rowObjectInspector;
    try {
        rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
    } catch (SerDeException e) {
        // Bail out
        return null;
    }
    List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
    ColumnInfo colInfo;
    String colName;
    ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
    for (int i = 0; i < fields.size(); i++) {
        colName = fields.get(i).getFieldName();
        colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
    }
    ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
    // 1.2 Add column info corresponding to partition columns
    ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
    for (FieldSchema part_col : viewTable.getPartCols()) {
        colName = part_col.getName();
        colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
        partitionColumns.add(colInfo);
    }
    // 1.3 Build row type from field <type, name>
    RelDataType rowType;
    try {
        rowType = TypeConverter.getType(cluster, rr, null);
    } catch (CalciteSemanticException e) {
        // Bail out
        return null;
    }
    // 2. Build RelOptAbstractTable
    String fullyQualifiedTabName = viewTable.getDbName();
    if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
        fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
    } else {
        fullyQualifiedTabName = viewTable.getTableName();
    }
    RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
    RelNode tableRel;
    // 3. Build operator
    if (obtainTableType(viewTable) == TableType.DRUID) {
        // Build Druid query
        String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
        String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
        Set<String> metrics = new HashSet<>();
        List<RelDataType> druidColTypes = new ArrayList<>();
        List<String> druidColNames = new ArrayList<>();
        for (RelDataTypeField field : rowType.getFieldList()) {
            druidColTypes.add(field.getType());
            druidColNames.add(field.getName());
            if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
                // timestamp
                continue;
            }
            if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
                // dimension
                continue;
            }
            metrics.add(field.getName());
        }
        List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
        DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
        final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
        tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
    } else {
        // Build Hive Table Scan Rel
        tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
    }
    return tableRel;
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) DruidTable(org.apache.calcite.adapter.druid.DruidTable) RelDataType(org.apache.calcite.rel.type.RelDataType) RowResolver(org.apache.hadoop.hive.ql.parse.RowResolver) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) HashSet(java.util.HashSet) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) TableScan(org.apache.calcite.rel.core.TableScan) DruidSchema(org.apache.calcite.adapter.druid.DruidSchema) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Interval(org.joda.time.Interval)

Example 14 with CalciteSemanticException

use of org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException in project hive by apache.

the class HiveMaterializedViewsRegistry method createMaterializedViewScan.

private static RelNode createMaterializedViewScan(HiveConf conf, Table viewTable) {
    // 0. Recreate cluster
    final RelOptPlanner planner = CalcitePlanner.createPlanner(conf);
    final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl(new HiveTypeSystemImpl()));
    final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
    // 1. Create column schema
    final RowResolver rr = new RowResolver();
    // 1.1 Add Column info for non partion cols (Object Inspector fields)
    StructObjectInspector rowObjectInspector;
    try {
        rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
    } catch (SerDeException e) {
        // Bail out
        return null;
    }
    List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
    ColumnInfo colInfo;
    String colName;
    ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
    for (int i = 0; i < fields.size(); i++) {
        colName = fields.get(i).getFieldName();
        colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
    }
    ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
    // 1.2 Add column info corresponding to partition columns
    ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
    for (FieldSchema part_col : viewTable.getPartCols()) {
        colName = part_col.getName();
        colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
        partitionColumns.add(colInfo);
    }
    // 1.3 Build row type from field <type, name>
    RelDataType rowType;
    try {
        rowType = TypeConverter.getType(cluster, rr, null);
    } catch (CalciteSemanticException e) {
        // Bail out
        return null;
    }
    // 2. Build RelOptAbstractTable
    String fullyQualifiedTabName = viewTable.getDbName();
    if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
        fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
    } else {
        fullyQualifiedTabName = viewTable.getTableName();
    }
    RelNode tableRel;
    // 3. Build operator
    if (obtainTableType(viewTable) == TableType.DRUID) {
        // Build Druid query
        String address = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
        String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
        Set<String> metrics = new HashSet<>();
        List<RelDataType> druidColTypes = new ArrayList<>();
        List<String> druidColNames = new ArrayList<>();
        // @NOTE this code is very similar to the code at org/apache/hadoop/hive/ql/parse/CalcitePlanner.java:2362
        // @TODO it will be nice to refactor it
        RelDataTypeFactory dtFactory = cluster.getRexBuilder().getTypeFactory();
        for (RelDataTypeField field : rowType.getFieldList()) {
            if (DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(field.getName())) {
                // Druid's time column is always not null.
                druidColTypes.add(dtFactory.createTypeWithNullability(field.getType(), false));
            } else {
                druidColTypes.add(field.getType());
            }
            druidColNames.add(field.getName());
            if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
                // timestamp
                continue;
            }
            if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
                // dimension
                continue;
            }
            metrics.add(field.getName());
        }
        List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
        rowType = dtFactory.createStructType(druidColTypes, druidColNames);
        RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<>(), conf, new HashMap<>(), new HashMap<>(), new AtomicInteger());
        DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals, null, null);
        final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
        tableRel = DruidQuery.create(cluster, cluster.traitSetOf(BindableConvention.INSTANCE), optTable, druidTable, ImmutableList.<RelNode>of(scan), ImmutableMap.of());
    } else {
        // Build Hive Table Scan Rel
        RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<>(), conf, new HashMap<>(), new HashMap<>(), new AtomicInteger());
        tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
    }
    return tableRel;
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) DruidTable(org.apache.calcite.adapter.druid.DruidTable) RelDataType(org.apache.calcite.rel.type.RelDataType) RowResolver(org.apache.hadoop.hive.ql.parse.RowResolver) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RelDataTypeFactory(org.apache.calcite.rel.type.RelDataTypeFactory) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) HashSet(java.util.HashSet) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) TableScan(org.apache.calcite.rel.core.TableScan) DruidSchema(org.apache.calcite.adapter.druid.DruidSchema) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) HiveTypeSystemImpl(org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Interval(org.joda.time.Interval)

Example 15 with CalciteSemanticException

use of org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException in project hive by apache.

the class ASTConverter method convert.

private ASTNode convert() throws CalciteSemanticException {
    /*
     * 1. Walk RelNode Graph; note from, where, gBy.. nodes.
     */
    new QBVisitor().go(root);
    /*
     * 2. convert from node.
     */
    QueryBlockInfo qb = convertSource(from);
    schema = qb.schema;
    hiveAST.from = ASTBuilder.construct(HiveParser.TOK_FROM, "TOK_FROM").add(qb.ast).node();
    /*
     * 3. convert filterNode
     */
    if (where != null) {
        ASTNode cond = where.getCondition().accept(new RexVisitor(schema, false, root.getCluster().getRexBuilder()));
        hiveAST.where = ASTBuilder.where(cond);
    }
    /*
     * 4. GBy
     */
    if (groupBy != null) {
        ASTBuilder b;
        boolean groupingSetsExpression = false;
        Group aggregateType = groupBy.getGroupType();
        switch(aggregateType) {
            case SIMPLE:
                b = ASTBuilder.construct(HiveParser.TOK_GROUPBY, "TOK_GROUPBY");
                break;
            case ROLLUP:
                b = ASTBuilder.construct(HiveParser.TOK_ROLLUP_GROUPBY, "TOK_ROLLUP_GROUPBY");
                break;
            case CUBE:
                b = ASTBuilder.construct(HiveParser.TOK_CUBE_GROUPBY, "TOK_CUBE_GROUPBY");
                break;
            case OTHER:
                b = ASTBuilder.construct(HiveParser.TOK_GROUPING_SETS, "TOK_GROUPING_SETS");
                groupingSetsExpression = true;
                break;
            default:
                throw new CalciteSemanticException("Group type not recognized");
        }
        HiveAggregate hiveAgg = (HiveAggregate) groupBy;
        for (int pos : hiveAgg.getAggregateColumnsOrder()) {
            RexInputRef iRef = new RexInputRef(groupBy.getGroupSet().nth(pos), groupBy.getCluster().getTypeFactory().createSqlType(SqlTypeName.ANY));
            b.add(iRef.accept(new RexVisitor(schema, false, root.getCluster().getRexBuilder())));
        }
        for (int pos = 0; pos < groupBy.getGroupCount(); pos++) {
            if (!hiveAgg.getAggregateColumnsOrder().contains(pos)) {
                RexInputRef iRef = new RexInputRef(groupBy.getGroupSet().nth(pos), groupBy.getCluster().getTypeFactory().createSqlType(SqlTypeName.ANY));
                b.add(iRef.accept(new RexVisitor(schema, false, root.getCluster().getRexBuilder())));
            }
        }
        // Grouping sets expressions
        if (groupingSetsExpression) {
            for (ImmutableBitSet groupSet : groupBy.getGroupSets()) {
                ASTBuilder expression = ASTBuilder.construct(HiveParser.TOK_GROUPING_SETS_EXPRESSION, "TOK_GROUPING_SETS_EXPRESSION");
                for (int i : groupSet) {
                    RexInputRef iRef = new RexInputRef(i, groupBy.getCluster().getTypeFactory().createSqlType(SqlTypeName.ANY));
                    expression.add(iRef.accept(new RexVisitor(schema, false, root.getCluster().getRexBuilder())));
                }
                b.add(expression);
            }
        }
        if (!groupBy.getGroupSet().isEmpty()) {
            hiveAST.groupBy = b.node();
        }
        schema = new Schema(schema, groupBy);
    }
    /*
     * 5. Having
     */
    if (having != null) {
        ASTNode cond = having.getCondition().accept(new RexVisitor(schema, false, root.getCluster().getRexBuilder()));
        hiveAST.having = ASTBuilder.having(cond);
    }
    /*
     * 6. Project
     */
    ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_SELECT, "TOK_SELECT");
    if (select instanceof Project) {
        List<RexNode> childExps = ((Project) select).getChildExps();
        if (childExps.isEmpty()) {
            RexLiteral r = select.getCluster().getRexBuilder().makeExactLiteral(new BigDecimal(1));
            ASTNode selectExpr = ASTBuilder.selectExpr(ASTBuilder.literal(r), "1");
            b.add(selectExpr);
        } else {
            int i = 0;
            for (RexNode r : childExps) {
                ASTNode expr = r.accept(new RexVisitor(schema, r instanceof RexLiteral, select.getCluster().getRexBuilder()));
                String alias = select.getRowType().getFieldNames().get(i++);
                ASTNode selectExpr = ASTBuilder.selectExpr(expr, alias);
                b.add(selectExpr);
            }
        }
        hiveAST.select = b.node();
    } else {
        // select is UDTF
        HiveTableFunctionScan udtf = (HiveTableFunctionScan) select;
        List<ASTNode> children = new ArrayList<>();
        RexCall call = (RexCall) udtf.getCall();
        for (RexNode r : call.getOperands()) {
            ASTNode expr = r.accept(new RexVisitor(schema, r instanceof RexLiteral, select.getCluster().getRexBuilder()));
            children.add(expr);
        }
        ASTBuilder sel = ASTBuilder.construct(HiveParser.TOK_SELEXPR, "TOK_SELEXPR");
        ASTNode function = buildUDTFAST(call.getOperator().getName(), children);
        sel.add(function);
        for (String alias : udtf.getRowType().getFieldNames()) {
            sel.add(HiveParser.Identifier, alias);
        }
        b.add(sel);
        hiveAST.select = b.node();
    }
    /*
     * 7. Order Use in Order By from the block above. RelNode has no pointer to
     * parent hence we need to go top down; but OB at each block really belong
     * to its src/from. Hence the need to pass in sort for each block from
     * its parent.
     * 8. Limit
     */
    convertOrderLimitToASTNode((HiveSortLimit) orderLimit);
    return hiveAST.getAST();
}
Also used : Group(org.apache.calcite.rel.core.Aggregate.Group) RexLiteral(org.apache.calcite.rex.RexLiteral) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) HiveTableFunctionScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan) ArrayList(java.util.ArrayList) BigDecimal(java.math.BigDecimal) RexCall(org.apache.calcite.rex.RexCall) Project(org.apache.calcite.rel.core.Project) HiveAggregate(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) RexInputRef(org.apache.calcite.rex.RexInputRef) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) RexNode(org.apache.calcite.rex.RexNode)

Aggregations

CalciteSemanticException (org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)19 ArrayList (java.util.ArrayList)11 RexNode (org.apache.calcite.rex.RexNode)10 RelNode (org.apache.calcite.rel.RelNode)9 RelDataType (org.apache.calcite.rel.type.RelDataType)7 RexBuilder (org.apache.calcite.rex.RexBuilder)7 HashSet (java.util.HashSet)6 RelOptCluster (org.apache.calcite.plan.RelOptCluster)6 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)5 ImmutableList (com.google.common.collect.ImmutableList)4 ImmutableBitSet (org.apache.calcite.util.ImmutableBitSet)4 JoinPredicateInfo (org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo)4 HiveJoin (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin)4 HiveRelNode (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode)4 BigDecimal (java.math.BigDecimal)3 AggregateCall (org.apache.calcite.rel.core.AggregateCall)3 RexInputRef (org.apache.calcite.rex.RexInputRef)3 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)3 ColumnInfo (org.apache.hadoop.hive.ql.exec.ColumnInfo)3 RelOptHiveTable (org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable)3