Search in sources :

Example 1 with RelOptPlanner

use of org.apache.calcite.plan.RelOptPlanner in project hive by apache.

the class HiveMaterializedViewFilterScanRule method apply.

protected void apply(RelOptRuleCall call, Project project, Filter filter, TableScan scan) {
    RelOptPlanner planner = call.getPlanner();
    List<RelOptMaterialization> materializations = (planner instanceof VolcanoPlanner) ? ((VolcanoPlanner) planner).getMaterializations() : ImmutableList.<RelOptMaterialization>of();
    if (!materializations.isEmpty()) {
        RelNode root = project.copy(project.getTraitSet(), Collections.singletonList(filter.copy(filter.getTraitSet(), Collections.singletonList((RelNode) scan))));
        // Costing is done in transformTo(), so we call it repeatedly with all applicable
        // materialized views and cheapest one will be picked
        List<RelOptMaterialization> applicableMaterializations = VolcanoPlanner.getApplicableMaterializations(root, materializations);
        for (RelOptMaterialization materialization : applicableMaterializations) {
            List<RelNode> subs = new MaterializedViewSubstitutionVisitor(materialization.queryRel, root, relBuilderFactory).go(materialization.tableRel);
            for (RelNode s : subs) {
                call.transformTo(s);
            }
        }
    }
}
Also used : RelNode(org.apache.calcite.rel.RelNode) RelOptMaterialization(org.apache.calcite.plan.RelOptMaterialization) VolcanoPlanner(org.apache.calcite.plan.volcano.VolcanoPlanner) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner)

Example 2 with RelOptPlanner

use of org.apache.calcite.plan.RelOptPlanner in project drill by apache.

the class DefaultSqlHandler method transform.

/**
   * Transform RelNode to a new RelNode, targeting the provided set of traits. Also will log the outcome if asked.
   *
   * @param plannerType
   *          The type of Planner to use.
   * @param phase
   *          The transformation phase we're running.
   * @param input
   *          The origianl RelNode
   * @param targetTraits
   *          The traits we are targeting for output.
   * @param log
   *          Whether to log the planning phase.
   * @return The transformed relnode.
   */
protected RelNode transform(PlannerType plannerType, PlannerPhase phase, RelNode input, RelTraitSet targetTraits, boolean log) {
    final Stopwatch watch = Stopwatch.createStarted();
    final RuleSet rules = config.getRules(phase);
    final RelTraitSet toTraits = targetTraits.simplify();
    final RelNode output;
    switch(plannerType) {
        case HEP_BOTTOM_UP:
        case HEP:
            {
                final HepProgramBuilder hepPgmBldr = new HepProgramBuilder();
                if (plannerType == PlannerType.HEP_BOTTOM_UP) {
                    hepPgmBldr.addMatchOrder(HepMatchOrder.BOTTOM_UP);
                }
                for (RelOptRule rule : rules) {
                    hepPgmBldr.addRuleInstance(rule);
                }
                final HepPlanner planner = new HepPlanner(hepPgmBldr.build(), context.getPlannerSettings());
                JaninoRelMetadataProvider relMetadataProvider = JaninoRelMetadataProvider.of(DrillDefaultRelMetadataProvider.INSTANCE);
                RelMetadataQuery.THREAD_PROVIDERS.set(relMetadataProvider);
                // Modify RelMetaProvider for every RelNode in the SQL operator Rel tree.
                input.accept(new MetaDataProviderModifier(relMetadataProvider));
                planner.setRoot(input);
                if (!input.getTraitSet().equals(targetTraits)) {
                    planner.changeTraits(input, toTraits);
                }
                output = planner.findBestExp();
                break;
            }
        case VOLCANO:
        default:
            {
                // as weird as it seems, the cluster's only planner is the volcano planner.
                final RelOptPlanner planner = input.getCluster().getPlanner();
                final Program program = Programs.of(rules);
                Preconditions.checkArgument(planner instanceof VolcanoPlanner, "Cluster is expected to be constructed using VolcanoPlanner. Was actually of type %s.", planner.getClass().getName());
                output = program.run(planner, input, toTraits);
                break;
            }
    }
    if (log) {
        log(plannerType, phase, output, logger, watch);
    }
    return output;
}
Also used : RuleSet(org.apache.calcite.tools.RuleSet) Program(org.apache.calcite.tools.Program) RelNode(org.apache.calcite.rel.RelNode) Stopwatch(com.google.common.base.Stopwatch) HepProgramBuilder(org.apache.calcite.plan.hep.HepProgramBuilder) JaninoRelMetadataProvider(org.apache.calcite.rel.metadata.JaninoRelMetadataProvider) VolcanoPlanner(org.apache.calcite.plan.volcano.VolcanoPlanner) RelTraitSet(org.apache.calcite.plan.RelTraitSet) HepPlanner(org.apache.calcite.plan.hep.HepPlanner) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) RelOptRule(org.apache.calcite.plan.RelOptRule)

Example 3 with RelOptPlanner

use of org.apache.calcite.plan.RelOptPlanner in project hive by apache.

the class HiveMaterializedViewsRegistry method createTableScan.

private static RelNode createTableScan(Table viewTable) {
    // 0. Recreate cluster
    final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
    final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
    final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
    // 1. Create column schema
    final RowResolver rr = new RowResolver();
    // 1.1 Add Column info for non partion cols (Object Inspector fields)
    StructObjectInspector rowObjectInspector;
    try {
        rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
    } catch (SerDeException e) {
        // Bail out
        return null;
    }
    List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
    ColumnInfo colInfo;
    String colName;
    ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
    for (int i = 0; i < fields.size(); i++) {
        colName = fields.get(i).getFieldName();
        colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
    }
    ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
    // 1.2 Add column info corresponding to partition columns
    ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
    for (FieldSchema part_col : viewTable.getPartCols()) {
        colName = part_col.getName();
        colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
        partitionColumns.add(colInfo);
    }
    // 1.3 Build row type from field <type, name>
    RelDataType rowType;
    try {
        rowType = TypeConverter.getType(cluster, rr, null);
    } catch (CalciteSemanticException e) {
        // Bail out
        return null;
    }
    // 2. Build RelOptAbstractTable
    String fullyQualifiedTabName = viewTable.getDbName();
    if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
        fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
    } else {
        fullyQualifiedTabName = viewTable.getTableName();
    }
    RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
    RelNode tableRel;
    // 3. Build operator
    if (obtainTableType(viewTable) == TableType.DRUID) {
        // Build Druid query
        String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
        String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
        Set<String> metrics = new HashSet<>();
        List<RelDataType> druidColTypes = new ArrayList<>();
        List<String> druidColNames = new ArrayList<>();
        for (RelDataTypeField field : rowType.getFieldList()) {
            druidColTypes.add(field.getType());
            druidColNames.add(field.getName());
            if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
                // timestamp
                continue;
            }
            if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
                // dimension
                continue;
            }
            metrics.add(field.getName());
        }
        List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
        DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
        final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
        tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
    } else {
        // Build Hive Table Scan Rel
        tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
    }
    return tableRel;
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) DruidTable(org.apache.calcite.adapter.druid.DruidTable) RelDataType(org.apache.calcite.rel.type.RelDataType) RowResolver(org.apache.hadoop.hive.ql.parse.RowResolver) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) HashSet(java.util.HashSet) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) TableScan(org.apache.calcite.rel.core.TableScan) DruidSchema(org.apache.calcite.adapter.druid.DruidSchema) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Interval(org.joda.time.Interval)

Aggregations

RelOptPlanner (org.apache.calcite.plan.RelOptPlanner)3 RelNode (org.apache.calcite.rel.RelNode)3 VolcanoPlanner (org.apache.calcite.plan.volcano.VolcanoPlanner)2 Stopwatch (com.google.common.base.Stopwatch)1 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 DruidSchema (org.apache.calcite.adapter.druid.DruidSchema)1 DruidTable (org.apache.calcite.adapter.druid.DruidTable)1 JavaTypeFactoryImpl (org.apache.calcite.jdbc.JavaTypeFactoryImpl)1 RelOptCluster (org.apache.calcite.plan.RelOptCluster)1 RelOptMaterialization (org.apache.calcite.plan.RelOptMaterialization)1 RelOptRule (org.apache.calcite.plan.RelOptRule)1 RelTraitSet (org.apache.calcite.plan.RelTraitSet)1 HepPlanner (org.apache.calcite.plan.hep.HepPlanner)1 HepProgramBuilder (org.apache.calcite.plan.hep.HepProgramBuilder)1 TableScan (org.apache.calcite.rel.core.TableScan)1 JaninoRelMetadataProvider (org.apache.calcite.rel.metadata.JaninoRelMetadataProvider)1 RelDataType (org.apache.calcite.rel.type.RelDataType)1 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)1