use of org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class FlinkRelDecorrelator method createValueGenerator.
/**
* Create RelNode tree that produces a list of correlated variables.
*
* @param correlations correlated variables to generate
* @param valueGenFieldOffset offset in the output that generated columns
* will start
* @param mapCorVarToOutputPos output positions for the correlated variables
* generated
* @return RelNode the root of the resultant RelNode tree
*/
private RelNode createValueGenerator(Iterable<Correlation> correlations, int valueGenFieldOffset, SortedMap<Correlation, Integer> mapCorVarToOutputPos) {
final Map<RelNode, List<Integer>> mapNewInputToOutputPos = new HashMap<>();
final Map<RelNode, Integer> mapNewInputToNewOffset = new HashMap<>();
// Add to map all the referenced positions (relative to each input rel).
for (Correlation corVar : correlations) {
final int oldCorVarOffset = corVar.field;
final RelNode oldInput = getCorRel(corVar);
assert oldInput != null;
final Frame frame = map.get(oldInput);
assert frame != null;
final RelNode newInput = frame.r;
final List<Integer> newLocalOutputPosList;
if (!mapNewInputToOutputPos.containsKey(newInput)) {
newLocalOutputPosList = Lists.newArrayList();
} else {
newLocalOutputPosList = mapNewInputToOutputPos.get(newInput);
}
final int newCorVarOffset = frame.oldToNewOutputPos.get(oldCorVarOffset);
// Add all unique positions referenced.
if (!newLocalOutputPosList.contains(newCorVarOffset)) {
newLocalOutputPosList.add(newCorVarOffset);
}
mapNewInputToOutputPos.put(newInput, newLocalOutputPosList);
}
int offset = 0;
// Project only the correlated fields out of each inputRel
// and join the projectRel together.
// To make sure the plan does not change in terms of join order,
// join these rels based on their occurrence in cor var list which
// is sorted.
final Set<RelNode> joinedInputRelSet = Sets.newHashSet();
RelNode r = null;
for (Correlation corVar : correlations) {
final RelNode oldInput = getCorRel(corVar);
assert oldInput != null;
final RelNode newInput = map.get(oldInput).r;
assert newInput != null;
if (!joinedInputRelSet.contains(newInput)) {
RelNode project = RelOptUtil.createProject(newInput, mapNewInputToOutputPos.get(newInput));
RelNode distinct = RelOptUtil.createDistinctRel(project);
RelOptCluster cluster = distinct.getCluster();
joinedInputRelSet.add(newInput);
mapNewInputToNewOffset.put(newInput, offset);
offset += distinct.getRowType().getFieldCount();
if (r == null) {
r = distinct;
} else {
r = LogicalJoin.create(r, distinct, cluster.getRexBuilder().makeLiteral(true), ImmutableSet.<CorrelationId>of(), JoinRelType.INNER);
}
}
}
// referencing correlated variables.
for (Correlation corVar : correlations) {
// The first input of a Correlator is always the rel defining
// the correlated variables.
final RelNode oldInput = getCorRel(corVar);
assert oldInput != null;
final Frame frame = map.get(oldInput);
final RelNode newInput = frame.r;
assert newInput != null;
final List<Integer> newLocalOutputPosList = mapNewInputToOutputPos.get(newInput);
final int newLocalOutputPos = frame.oldToNewOutputPos.get(corVar.field);
// newOutputPos is the index of the cor var in the referenced
// position list plus the offset of referenced position list of
// each newInput.
final int newOutputPos = newLocalOutputPosList.indexOf(newLocalOutputPos) + mapNewInputToNewOffset.get(newInput) + valueGenFieldOffset;
if (mapCorVarToOutputPos.containsKey(corVar)) {
assert mapCorVarToOutputPos.get(corVar) == newOutputPos;
}
mapCorVarToOutputPos.put(corVar, newOutputPos);
}
return r;
}
use of org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class FlinkRelDecorrelator method decorrelateQuery.
//~ Methods ----------------------------------------------------------------
/**
* Decorrelates a query.
* <p>
* <p>This is the main entry point to {@code FlinkRelDecorrelator}.
*
* @param rootRel Root node of the query
* @return Equivalent query with all
* {@link LogicalCorrelate} instances removed
*/
public static RelNode decorrelateQuery(RelNode rootRel) {
final CorelMap corelMap = new CorelMapBuilder().build(rootRel);
if (!corelMap.hasCorrelation()) {
return rootRel;
}
final RelOptCluster cluster = rootRel.getCluster();
final FlinkRelDecorrelator decorrelator = new FlinkRelDecorrelator(cluster, corelMap, cluster.getPlanner().getContext());
RelNode newRootRel = decorrelator.removeCorrelationViaRule(rootRel);
if (SQL2REL_LOGGER.isDebugEnabled()) {
SQL2REL_LOGGER.debug(RelOptUtil.dumpPlan("Plan after removing Correlator", newRootRel, false, SqlExplainLevel.EXPPLAN_ATTRIBUTES));
}
if (!decorrelator.cm.mapCorVarToCorRel.isEmpty()) {
newRootRel = decorrelator.decorrelate(newRootRel);
}
return newRootRel;
}
use of org.apache.calcite.plan.RelOptCluster in project storm by apache.
the class TridentModifyRule method convert.
@Override
public RelNode convert(RelNode rel) {
final TableModify tableModify = (TableModify) rel;
final RelNode input = tableModify.getInput();
final RelOptCluster cluster = tableModify.getCluster();
final RelTraitSet traitSet = tableModify.getTraitSet().replace(TridentLogicalConvention.INSTANCE);
final RelOptTable relOptTable = tableModify.getTable();
final Prepare.CatalogReader catalogReader = tableModify.getCatalogReader();
final RelNode convertedInput = convert(input, input.getTraitSet().replace(TridentLogicalConvention.INSTANCE));
final TableModify.Operation operation = tableModify.getOperation();
final List<String> updateColumnList = tableModify.getUpdateColumnList();
final List<RexNode> sourceExpressionList = tableModify.getSourceExpressionList();
final boolean flattened = tableModify.isFlattened();
final Table table = tableModify.getTable().unwrap(Table.class);
switch(table.getJdbcTableType()) {
case STREAM:
if (operation != TableModify.Operation.INSERT) {
throw new UnsupportedOperationException(String.format("Streams doesn't support %s modify operation", operation));
}
return new TridentStreamInsertRel(cluster, traitSet, relOptTable, catalogReader, convertedInput, operation, updateColumnList, sourceExpressionList, flattened);
default:
throw new IllegalArgumentException(String.format("Unsupported table type: %s", table.getJdbcTableType()));
}
}
use of org.apache.calcite.plan.RelOptCluster in project hive by apache.
the class HiveMaterializedViewsRegistry method createTableScan.
private static RelNode createTableScan(Table viewTable) {
// 0. Recreate cluster
final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
// 1. Create column schema
final RowResolver rr = new RowResolver();
// 1.1 Add Column info for non partion cols (Object Inspector fields)
StructObjectInspector rowObjectInspector;
try {
rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
} catch (SerDeException e) {
// Bail out
return null;
}
List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
ColumnInfo colInfo;
String colName;
ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
for (int i = 0; i < fields.size(); i++) {
colName = fields.get(i).getFieldName();
colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
}
ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
// 1.2 Add column info corresponding to partition columns
ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
for (FieldSchema part_col : viewTable.getPartCols()) {
colName = part_col.getName();
colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
partitionColumns.add(colInfo);
}
// 1.3 Build row type from field <type, name>
RelDataType rowType;
try {
rowType = TypeConverter.getType(cluster, rr, null);
} catch (CalciteSemanticException e) {
// Bail out
return null;
}
// 2. Build RelOptAbstractTable
String fullyQualifiedTabName = viewTable.getDbName();
if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
} else {
fullyQualifiedTabName = viewTable.getTableName();
}
RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
RelNode tableRel;
// 3. Build operator
if (obtainTableType(viewTable) == TableType.DRUID) {
// Build Druid query
String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
Set<String> metrics = new HashSet<>();
List<RelDataType> druidColTypes = new ArrayList<>();
List<String> druidColNames = new ArrayList<>();
for (RelDataTypeField field : rowType.getFieldList()) {
druidColTypes.add(field.getType());
druidColNames.add(field.getName());
if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
// timestamp
continue;
}
if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
// dimension
continue;
}
metrics.add(field.getName());
}
List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
} else {
// Build Hive Table Scan Rel
tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
}
return tableRel;
}
use of org.apache.calcite.plan.RelOptCluster in project hive by apache.
the class TestCBORuleFiredOnlyOnce method testRuleFiredOnlyOnce.
@Test
public void testRuleFiredOnlyOnce() {
HiveConf conf = new HiveConf();
// Create HepPlanner
HepProgramBuilder programBuilder = new HepProgramBuilder();
programBuilder.addMatchOrder(HepMatchOrder.TOP_DOWN);
programBuilder = programBuilder.addRuleCollection(ImmutableList.<RelOptRule>of(DummyRule.INSTANCE));
// Create rules registry to not trigger a rule more than once
HiveRulesRegistry registry = new HiveRulesRegistry();
HivePlannerContext context = new HivePlannerContext(null, registry, null);
HepPlanner planner = new HepPlanner(programBuilder.build(), context);
// Cluster
RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
// Create MD provider
HiveDefaultRelMetadataProvider mdProvider = new HiveDefaultRelMetadataProvider(conf);
List<RelMetadataProvider> list = Lists.newArrayList();
list.add(mdProvider.getMetadataProvider());
planner.registerMetadataProviders(list);
RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list);
final RelNode node = new DummyNode(cluster, cluster.traitSet());
node.getCluster().setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, planner));
planner.setRoot(node);
planner.findBestExp();
// Matches 3 times: 2 times the original node, 1 time the new node created by the rule
assertEquals(3, DummyRule.INSTANCE.numberMatches);
// It is fired only once: on the original node
assertEquals(1, DummyRule.INSTANCE.numberOnMatch);
}
Aggregations