use of org.apache.calcite.plan.RelOptPlanner in project hive by apache.
the class HiveMaterializedViewFilterScanRule method apply.
protected void apply(RelOptRuleCall call, Project project, Filter filter, TableScan scan) {
RelOptPlanner planner = call.getPlanner();
List<RelOptMaterialization> materializations = (planner instanceof VolcanoPlanner) ? ((VolcanoPlanner) planner).getMaterializations() : ImmutableList.<RelOptMaterialization>of();
if (!materializations.isEmpty()) {
RelNode root = project.copy(project.getTraitSet(), Collections.singletonList(filter.copy(filter.getTraitSet(), Collections.singletonList((RelNode) scan))));
// Costing is done in transformTo(), so we call it repeatedly with all applicable
// materialized views and cheapest one will be picked
List<RelOptMaterialization> applicableMaterializations = VolcanoPlanner.getApplicableMaterializations(root, materializations);
for (RelOptMaterialization materialization : applicableMaterializations) {
List<RelNode> subs = new MaterializedViewSubstitutionVisitor(materialization.queryRel, root, relBuilderFactory).go(materialization.tableRel);
for (RelNode s : subs) {
call.transformTo(s);
}
}
}
}
use of org.apache.calcite.plan.RelOptPlanner in project drill by apache.
the class DefaultSqlHandler method transform.
/**
* Transform RelNode to a new RelNode, targeting the provided set of traits. Also will log the outcome if asked.
*
* @param plannerType
* The type of Planner to use.
* @param phase
* The transformation phase we're running.
* @param input
* The origianl RelNode
* @param targetTraits
* The traits we are targeting for output.
* @param log
* Whether to log the planning phase.
* @return The transformed relnode.
*/
protected RelNode transform(PlannerType plannerType, PlannerPhase phase, RelNode input, RelTraitSet targetTraits, boolean log) {
final Stopwatch watch = Stopwatch.createStarted();
final RuleSet rules = config.getRules(phase);
final RelTraitSet toTraits = targetTraits.simplify();
final RelNode output;
switch(plannerType) {
case HEP_BOTTOM_UP:
case HEP:
{
final HepProgramBuilder hepPgmBldr = new HepProgramBuilder();
if (plannerType == PlannerType.HEP_BOTTOM_UP) {
hepPgmBldr.addMatchOrder(HepMatchOrder.BOTTOM_UP);
}
for (RelOptRule rule : rules) {
hepPgmBldr.addRuleInstance(rule);
}
final HepPlanner planner = new HepPlanner(hepPgmBldr.build(), context.getPlannerSettings());
JaninoRelMetadataProvider relMetadataProvider = JaninoRelMetadataProvider.of(DrillDefaultRelMetadataProvider.INSTANCE);
RelMetadataQuery.THREAD_PROVIDERS.set(relMetadataProvider);
// Modify RelMetaProvider for every RelNode in the SQL operator Rel tree.
input.accept(new MetaDataProviderModifier(relMetadataProvider));
planner.setRoot(input);
if (!input.getTraitSet().equals(targetTraits)) {
planner.changeTraits(input, toTraits);
}
output = planner.findBestExp();
break;
}
case VOLCANO:
default:
{
// as weird as it seems, the cluster's only planner is the volcano planner.
final RelOptPlanner planner = input.getCluster().getPlanner();
final Program program = Programs.of(rules);
Preconditions.checkArgument(planner instanceof VolcanoPlanner, "Cluster is expected to be constructed using VolcanoPlanner. Was actually of type %s.", planner.getClass().getName());
output = program.run(planner, input, toTraits);
break;
}
}
if (log) {
log(plannerType, phase, output, logger, watch);
}
return output;
}
use of org.apache.calcite.plan.RelOptPlanner in project hive by apache.
the class HiveMaterializedViewsRegistry method createTableScan.
private static RelNode createTableScan(Table viewTable) {
// 0. Recreate cluster
final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
// 1. Create column schema
final RowResolver rr = new RowResolver();
// 1.1 Add Column info for non partion cols (Object Inspector fields)
StructObjectInspector rowObjectInspector;
try {
rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
} catch (SerDeException e) {
// Bail out
return null;
}
List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
ColumnInfo colInfo;
String colName;
ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
for (int i = 0; i < fields.size(); i++) {
colName = fields.get(i).getFieldName();
colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
}
ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
// 1.2 Add column info corresponding to partition columns
ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
for (FieldSchema part_col : viewTable.getPartCols()) {
colName = part_col.getName();
colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
partitionColumns.add(colInfo);
}
// 1.3 Build row type from field <type, name>
RelDataType rowType;
try {
rowType = TypeConverter.getType(cluster, rr, null);
} catch (CalciteSemanticException e) {
// Bail out
return null;
}
// 2. Build RelOptAbstractTable
String fullyQualifiedTabName = viewTable.getDbName();
if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
} else {
fullyQualifiedTabName = viewTable.getTableName();
}
RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
RelNode tableRel;
// 3. Build operator
if (obtainTableType(viewTable) == TableType.DRUID) {
// Build Druid query
String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
Set<String> metrics = new HashSet<>();
List<RelDataType> druidColTypes = new ArrayList<>();
List<String> druidColNames = new ArrayList<>();
for (RelDataTypeField field : rowType.getFieldList()) {
druidColTypes.add(field.getType());
druidColNames.add(field.getName());
if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
// timestamp
continue;
}
if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
// dimension
continue;
}
metrics.add(field.getName());
}
List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
} else {
// Build Hive Table Scan Rel
tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
}
return tableRel;
}
Aggregations