Search in sources :

Example 6 with HiveTableScan

use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan in project hive by apache.

the class ASTBuilder method table.

public static ASTNode table(final RelNode scan) {
    HiveTableScan hts = null;
    if (scan instanceof HiveJdbcConverter) {
        hts = ((HiveJdbcConverter) scan).getTableScan().getHiveTableScan();
    } else if (scan instanceof DruidQuery) {
        hts = (HiveTableScan) ((DruidQuery) scan).getTableScan();
    } else {
        hts = (HiveTableScan) scan;
    }
    assert hts != null;
    RelOptHiveTable hTbl = (RelOptHiveTable) hts.getTable();
    ASTBuilder tableNameBuilder = ASTBuilder.construct(HiveParser.TOK_TABNAME, "TOK_TABNAME").add(HiveParser.Identifier, hTbl.getHiveTableMD().getDbName()).add(HiveParser.Identifier, hTbl.getHiveTableMD().getTableName());
    if (hTbl.getHiveTableMD().getMetaTable() != null) {
        tableNameBuilder.add(HiveParser.Identifier, hTbl.getHiveTableMD().getMetaTable());
    }
    ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_TABREF, "TOK_TABREF").add(tableNameBuilder);
    if (hTbl.getHiveTableMD().getAsOfTimestamp() != null) {
        ASTBuilder asOfBuilder = ASTBuilder.construct(HiveParser.TOK_AS_OF_TIME, "TOK_AS_OF_TIME").add(HiveParser.StringLiteral, hTbl.getHiveTableMD().getAsOfTimestamp());
        b.add(asOfBuilder);
    }
    if (hTbl.getHiveTableMD().getAsOfVersion() != null) {
        ASTBuilder asOfBuilder = ASTBuilder.construct(HiveParser.TOK_AS_OF_VERSION, "TOK_AS_OF_VERSION").add(HiveParser.Number, hTbl.getHiveTableMD().getAsOfVersion());
        b.add(asOfBuilder);
    }
    ASTBuilder propList = ASTBuilder.construct(HiveParser.TOK_TABLEPROPLIST, "TOK_TABLEPROPLIST");
    if (scan instanceof DruidQuery) {
        // Passing query spec, column names and column types to be used as part of Hive Physical execution
        DruidQuery dq = (DruidQuery) scan;
        // Adding Query specs to be used by org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_JSON + "\"").add(HiveParser.StringLiteral, "\"" + SemanticAnalyzer.escapeSQLString(dq.getQueryString()) + "\""));
        // Adding column names used later by org.apache.hadoop.hive.druid.serde.DruidSerDe
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_FIELD_NAMES + "\"").add(HiveParser.StringLiteral, "\"" + dq.getRowType().getFieldNames().stream().map(Object::toString).collect(Collectors.joining(",")) + "\""));
        // Adding column types used later by org.apache.hadoop.hive.druid.serde.DruidSerDe
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_FIELD_TYPES + "\"").add(HiveParser.StringLiteral, "\"" + dq.getRowType().getFieldList().stream().map(e -> TypeConverter.convert(e.getType()).getTypeName()).collect(Collectors.joining(",")) + "\""));
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_TYPE + "\"").add(HiveParser.StringLiteral, "\"" + dq.getQueryType().getQueryName() + "\""));
    } else if (scan instanceof HiveJdbcConverter) {
        HiveJdbcConverter jdbcConverter = (HiveJdbcConverter) scan;
        final String query = jdbcConverter.generateSql();
        LOGGER.debug("Generated SQL query: " + System.lineSeparator() + query);
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.JDBC_QUERY + "\"").add(HiveParser.StringLiteral, "\"" + SemanticAnalyzer.escapeSQLString(query) + "\""));
        // Whether we can split the query
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.JDBC_SPLIT_QUERY + "\"").add(HiveParser.StringLiteral, "\"" + jdbcConverter.splittingAllowed() + "\""));
        // Adding column names used later by org.apache.hadoop.hive.druid.serde.DruidSerDe
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.JDBC_QUERY_FIELD_NAMES + "\"").add(HiveParser.StringLiteral, "\"" + scan.getRowType().getFieldNames().stream().map(Object::toString).collect(Collectors.joining(",")) + "\""));
        // Adding column types used later by org.apache.hadoop.hive.druid.serde.DruidSerDe
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.JDBC_QUERY_FIELD_TYPES + "\"").add(HiveParser.StringLiteral, "\"" + scan.getRowType().getFieldList().stream().map(e -> TypeConverter.convert(e.getType()).getTypeName()).collect(Collectors.joining(",")) + "\""));
    }
    if (hts.isInsideView()) {
        // We need to carry the insideView information from calcite into the ast.
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"insideView\"").add(HiveParser.StringLiteral, "\"TRUE\""));
    }
    if (hts.getTableScanTrait() != null) {
        // We need to carry the fetchDeletedRows information from calcite into the ast.
        propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, String.format("\"%s\"", hts.getTableScanTrait().getPropertyKey())).add(HiveParser.StringLiteral, "\"TRUE\""));
    }
    b.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTIES, "TOK_TABLEPROPERTIES").add(propList));
    // NOTE: Calcite considers tbls to be equal if their names are the same. Hence
    // we need to provide Calcite the fully qualified table name (dbname.tblname)
    // and not the user provided aliases.
    // However in HIVE DB name can not appear in select list; in case of join
    // where table names differ only in DB name, Hive would require user
    // introducing explicit aliases for tbl.
    b.add(HiveParser.Identifier, hts.getTableAlias());
    return b.node();
}
Also used : SemanticAnalyzer(org.apache.hadoop.hive.ql.parse.SemanticAnalyzer) HiveIntervalDayTime(org.apache.hadoop.hive.common.type.HiveIntervalDayTime) LoggerFactory(org.slf4j.LoggerFactory) BaseSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) BigDecimal(java.math.BigDecimal) TimeString(org.apache.calcite.util.TimeString) HiveIntervalYearMonth(org.apache.hadoop.hive.common.type.HiveIntervalYearMonth) DruidQuery(org.apache.calcite.adapter.druid.DruidQuery) Constants(org.apache.hadoop.hive.conf.Constants) Logger(org.slf4j.Logger) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) DateString(org.apache.calcite.util.DateString) RexLiteral(org.apache.calcite.rex.RexLiteral) HiveParser(org.apache.hadoop.hive.ql.parse.HiveParser) TimestampString(org.apache.calcite.util.TimestampString) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveJdbcConverter(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) RelNode(org.apache.calcite.rel.RelNode) Collectors(java.util.stream.Collectors) ParseDriver(org.apache.hadoop.hive.ql.parse.ParseDriver) RexDynamicParam(org.apache.calcite.rex.RexDynamicParam) TimestampTZUtil(org.apache.hadoop.hive.common.type.TimestampTZUtil) JoinRelType(org.apache.calcite.rel.core.JoinRelType) DruidQuery(org.apache.calcite.adapter.druid.DruidQuery) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveJdbcConverter(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter) TimeString(org.apache.calcite.util.TimeString) DateString(org.apache.calcite.util.DateString) TimestampString(org.apache.calcite.util.TimestampString) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan)

Example 7 with HiveTableScan

use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan in project hive by apache.

the class HiveInsertOnlyScanWriteIdRule method matches.

@Override
public boolean matches(RelOptRuleCall call) {
    HiveTableScan tableScan = call.rel(0);
    Table tableMD = ((RelOptHiveTable) tableScan.getTable()).getHiveTableMD();
    return !tableMD.isMaterializedView() && AcidUtils.isInsertOnlyTable(tableMD);
}
Also used : RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) Table(org.apache.hadoop.hive.ql.metadata.Table) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan)

Example 8 with HiveTableScan

use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan in project hive by apache.

the class HiveInsertOnlyScanWriteIdRule method onMatch.

@Override
public void onMatch(RelOptRuleCall call) {
    HiveTableScan tableScan = call.rel(0);
    RelNode newTableScan = call.builder().push(tableScan.setTableScanTrait(HiveTableScan.HiveTableScanTrait.FetchInsertOnlyBucketIds)).build();
    call.transformTo(newTableScan);
}
Also used : RelNode(org.apache.calcite.rel.RelNode) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan)

Example 9 with HiveTableScan

use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan in project hive by apache.

the class FilterSelectivityEstimator method isPartitionPredicate.

private boolean isPartitionPredicate(RexNode expr, RelNode r) {
    if (r instanceof Project) {
        expr = RelOptUtil.pushFilterPastProject(expr, (Project) r);
        return isPartitionPredicate(expr, ((Project) r).getInput());
    } else if (r instanceof Filter) {
        return isPartitionPredicate(expr, ((Filter) r).getInput());
    } else if (r instanceof HiveTableScan) {
        RelOptHiveTable table = (RelOptHiveTable) ((HiveTableScan) r).getTable();
        ImmutableBitSet cols = RelOptUtil.InputFinder.bits(expr);
        return table.containsPartitionColumnsOnly(cols);
    }
    return false;
}
Also used : Project(org.apache.calcite.rel.core.Project) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) Filter(org.apache.calcite.rel.core.Filter) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan)

Example 10 with HiveTableScan

use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan in project hive by apache.

the class HiveMaterializedViewsRegistry method createTableScan.

private static RelNode createTableScan(Table viewTable) {
    // 0. Recreate cluster
    final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
    final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
    final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
    // 1. Create column schema
    final RowResolver rr = new RowResolver();
    // 1.1 Add Column info for non partion cols (Object Inspector fields)
    StructObjectInspector rowObjectInspector;
    try {
        rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
    } catch (SerDeException e) {
        // Bail out
        return null;
    }
    List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
    ColumnInfo colInfo;
    String colName;
    ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
    for (int i = 0; i < fields.size(); i++) {
        colName = fields.get(i).getFieldName();
        colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
    }
    ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
    // 1.2 Add column info corresponding to partition columns
    ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
    for (FieldSchema part_col : viewTable.getPartCols()) {
        colName = part_col.getName();
        colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
        partitionColumns.add(colInfo);
    }
    // 1.3 Build row type from field <type, name>
    RelDataType rowType;
    try {
        rowType = TypeConverter.getType(cluster, rr, null);
    } catch (CalciteSemanticException e) {
        // Bail out
        return null;
    }
    // 2. Build RelOptAbstractTable
    String fullyQualifiedTabName = viewTable.getDbName();
    if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
        fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
    } else {
        fullyQualifiedTabName = viewTable.getTableName();
    }
    RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
    RelNode tableRel;
    // 3. Build operator
    if (obtainTableType(viewTable) == TableType.DRUID) {
        // Build Druid query
        String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
        String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
        Set<String> metrics = new HashSet<>();
        List<RelDataType> druidColTypes = new ArrayList<>();
        List<String> druidColNames = new ArrayList<>();
        for (RelDataTypeField field : rowType.getFieldList()) {
            druidColTypes.add(field.getType());
            druidColNames.add(field.getName());
            if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
                // timestamp
                continue;
            }
            if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
                // dimension
                continue;
            }
            metrics.add(field.getName());
        }
        List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
        DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
        final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
        tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
    } else {
        // Build Hive Table Scan Rel
        tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
    }
    return tableRel;
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) DruidTable(org.apache.calcite.adapter.druid.DruidTable) RelDataType(org.apache.calcite.rel.type.RelDataType) RowResolver(org.apache.hadoop.hive.ql.parse.RowResolver) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) HashSet(java.util.HashSet) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) TableScan(org.apache.calcite.rel.core.TableScan) DruidSchema(org.apache.calcite.adapter.druid.DruidSchema) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Interval(org.joda.time.Interval)

Aggregations

HiveTableScan (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan)18 RelOptHiveTable (org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable)12 RelNode (org.apache.calcite.rel.RelNode)7 RexNode (org.apache.calcite.rex.RexNode)6 ImmutableBitSet (org.apache.calcite.util.ImmutableBitSet)5 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 HashSet (java.util.HashSet)4 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)4 RexBuilder (org.apache.calcite.rex.RexBuilder)4 Table (org.apache.hadoop.hive.ql.metadata.Table)4 DruidQuery (org.apache.calcite.adapter.druid.DruidQuery)3 RelOptCluster (org.apache.calcite.plan.RelOptCluster)3 Project (org.apache.calcite.rel.core.Project)3 HiveRelNode (org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode)3 BitSet (java.util.BitSet)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 DruidSchema (org.apache.calcite.adapter.druid.DruidSchema)2 DruidTable (org.apache.calcite.adapter.druid.DruidTable)2 JavaTypeFactoryImpl (org.apache.calcite.jdbc.JavaTypeFactoryImpl)2