use of org.apache.calcite.plan.RelOptCluster in project hive by apache.
the class HiveSortLimit method create.
/**
* Creates a HiveSortLimit.
*
* @param input Input relational expression
* @param collation array of sort specifications
* @param offset Expression for number of rows to discard before returning
* first row
* @param fetch Expression for number of rows to fetch
*/
public static HiveSortLimit create(RelNode input, RelCollation collation, RexNode offset, RexNode fetch) {
RelOptCluster cluster = input.getCluster();
collation = RelCollationTraitDef.INSTANCE.canonize(collation);
RelTraitSet traitSet = TraitsUtil.getSortTraitSet(cluster, input.getTraitSet(), collation);
return new HiveSortLimit(cluster, traitSet, input, collation, offset, fetch);
}
use of org.apache.calcite.plan.RelOptCluster in project hive by apache.
the class HiveMaterializedViewsRegistry method createTableScan.
private static RelNode createTableScan(Table viewTable) {
// 0. Recreate cluster
final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
// 1. Create column schema
final RowResolver rr = new RowResolver();
// 1.1 Add Column info for non partion cols (Object Inspector fields)
StructObjectInspector rowObjectInspector;
try {
rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
} catch (SerDeException e) {
// Bail out
return null;
}
List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
ColumnInfo colInfo;
String colName;
ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
for (int i = 0; i < fields.size(); i++) {
colName = fields.get(i).getFieldName();
colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
}
ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
// 1.2 Add column info corresponding to partition columns
ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
for (FieldSchema part_col : viewTable.getPartCols()) {
colName = part_col.getName();
colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
partitionColumns.add(colInfo);
}
// 1.3 Build row type from field <type, name>
RelDataType rowType;
try {
rowType = TypeConverter.getType(cluster, rr, null);
} catch (CalciteSemanticException e) {
// Bail out
return null;
}
// 2. Build RelOptAbstractTable
String fullyQualifiedTabName = viewTable.getDbName();
if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
} else {
fullyQualifiedTabName = viewTable.getTableName();
}
RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
RelNode tableRel;
// 3. Build operator
if (obtainTableType(viewTable) == TableType.DRUID) {
// Build Druid query
String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
Set<String> metrics = new HashSet<>();
List<RelDataType> druidColTypes = new ArrayList<>();
List<String> druidColNames = new ArrayList<>();
for (RelDataTypeField field : rowType.getFieldList()) {
druidColTypes.add(field.getType());
druidColNames.add(field.getName());
if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
// timestamp
continue;
}
if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
// dimension
continue;
}
metrics.add(field.getName());
}
List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
} else {
// Build Hive Table Scan Rel
tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
}
return tableRel;
}
use of org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class FlinkAggregateExpandDistinctAggregatesRule method rewriteUsingGroupingSets.
/*
public RelBuilder convertSingletonDistinct(RelBuilder relBuilder,
Aggregate aggregate, Set<Pair<List<Integer>, Integer>> argLists) {
// For example,
// SELECT deptno, COUNT(*), SUM(bonus), MIN(DISTINCT sal)
// FROM emp
// GROUP BY deptno
//
// becomes
//
// SELECT deptno, SUM(cnt), SUM(bonus), MIN(sal)
// FROM (
// SELECT deptno, COUNT(*) as cnt, SUM(bonus), sal
// FROM EMP
// GROUP BY deptno, sal) // Aggregate B
// GROUP BY deptno // Aggregate A
relBuilder.push(aggregate.getInput());
final List<Pair<RexNode, String>> projects = new ArrayList<>();
final Map<Integer, Integer> sourceOf = new HashMap<>();
SortedSet<Integer> newGroupSet = new TreeSet<>();
final List<RelDataTypeField> childFields =
relBuilder.peek().getRowType().getFieldList();
final boolean hasGroupBy = aggregate.getGroupSet().size() > 0;
// Add the distinct aggregate column(s) to the group-by columns,
// if not already a part of the group-by
newGroupSet.addAll(aggregate.getGroupSet().asList());
for (Pair<List<Integer>, Integer> argList : argLists) {
newGroupSet.addAll(argList.getKey());
}
// Re-map the arguments to the aggregate A. These arguments will get
// remapped because of the intermediate aggregate B generated as part of the
// transformation.
for (int arg : newGroupSet) {
sourceOf.put(arg, projects.size());
projects.add(RexInputRef.of2(arg, childFields));
}
// Generate the intermediate aggregate B
final List<AggregateCall> aggCalls = aggregate.getAggCallList();
final List<AggregateCall> newAggCalls = new ArrayList<>();
final List<Integer> fakeArgs = new ArrayList<>();
final Map<AggregateCall, Integer> callArgMap = new HashMap<>();
// First identify the real arguments, then use the rest for fake arguments
// e.g. if real arguments are 0, 1, 3. Then the fake arguments will be 2, 4
for (final AggregateCall aggCall : aggCalls) {
if (!aggCall.isDistinct()) {
for (int arg : aggCall.getArgList()) {
if (!sourceOf.containsKey(arg)) {
sourceOf.put(arg, projects.size());
}
}
}
}
int fakeArg0 = 0;
for (final AggregateCall aggCall : aggCalls) {
// We will deal with non-distinct aggregates below
if (!aggCall.isDistinct()) {
boolean isGroupKeyUsedInAgg = false;
for (int arg : aggCall.getArgList()) {
if (sourceOf.containsKey(arg)) {
isGroupKeyUsedInAgg = true;
break;
}
}
if (aggCall.getArgList().size() == 0 || isGroupKeyUsedInAgg) {
while (sourceOf.get(fakeArg0) != null) {
++fakeArg0;
}
fakeArgs.add(fakeArg0);
}
}
}
for (final AggregateCall aggCall : aggCalls) {
if (!aggCall.isDistinct()) {
for (int arg : aggCall.getArgList()) {
if (!sourceOf.containsKey(arg)) {
sourceOf.remove(arg);
}
}
}
}
// Compute the remapped arguments using fake arguments for non-distinct
// aggregates with no arguments e.g. count(*).
int fakeArgIdx = 0;
for (final AggregateCall aggCall : aggCalls) {
// Project the column corresponding to the distinct aggregate. Project
// as-is all the non-distinct aggregates
if (!aggCall.isDistinct()) {
final AggregateCall newCall =
AggregateCall.create(aggCall.getAggregation(), false,
aggCall.getArgList(), -1,
ImmutableBitSet.of(newGroupSet).cardinality(),
relBuilder.peek(), null, aggCall.name);
newAggCalls.add(newCall);
if (newCall.getArgList().size() == 0) {
int fakeArg = fakeArgs.get(fakeArgIdx);
callArgMap.put(newCall, fakeArg);
sourceOf.put(fakeArg, projects.size());
projects.add(
Pair.of((RexNode) new RexInputRef(fakeArg, newCall.getType()),
newCall.getName()));
++fakeArgIdx;
} else {
for (int arg : newCall.getArgList()) {
if (sourceOf.containsKey(arg)) {
int fakeArg = fakeArgs.get(fakeArgIdx);
callArgMap.put(newCall, fakeArg);
sourceOf.put(fakeArg, projects.size());
projects.add(
Pair.of((RexNode) new RexInputRef(fakeArg, newCall.getType()),
newCall.getName()));
++fakeArgIdx;
} else {
sourceOf.put(arg, projects.size());
projects.add(
Pair.of((RexNode) new RexInputRef(arg, newCall.getType()),
newCall.getName()));
}
}
}
}
}
// Generate the aggregate B (see the reference example above)
relBuilder.push(
aggregate.copy(
aggregate.getTraitSet(), relBuilder.build(),
false, ImmutableBitSet.of(newGroupSet), null, newAggCalls));
// Convert the existing aggregate to aggregate A (see the reference example above)
final List<AggregateCall> newTopAggCalls =
Lists.newArrayList(aggregate.getAggCallList());
// Use the remapped arguments for the (non)distinct aggregate calls
for (int i = 0; i < newTopAggCalls.size(); i++) {
// Re-map arguments.
final AggregateCall aggCall = newTopAggCalls.get(i);
final int argCount = aggCall.getArgList().size();
final List<Integer> newArgs = new ArrayList<>(argCount);
final AggregateCall newCall;
for (int j = 0; j < argCount; j++) {
final Integer arg = aggCall.getArgList().get(j);
if (callArgMap.containsKey(aggCall)) {
newArgs.add(sourceOf.get(callArgMap.get(aggCall)));
}
else {
newArgs.add(sourceOf.get(arg));
}
}
if (aggCall.isDistinct()) {
newCall =
AggregateCall.create(aggCall.getAggregation(), false, newArgs,
-1, aggregate.getGroupSet().cardinality(), relBuilder.peek(),
aggCall.getType(), aggCall.name);
} else {
// If aggregate B had a COUNT aggregate call the corresponding aggregate at
// aggregate A must be SUM. For other aggregates, it remains the same.
if (aggCall.getAggregation() instanceof SqlCountAggFunction) {
if (aggCall.getArgList().size() == 0) {
newArgs.add(sourceOf.get(callArgMap.get(aggCall)));
}
if (hasGroupBy) {
SqlSumAggFunction sumAgg = new SqlSumAggFunction(null);
newCall =
AggregateCall.create(sumAgg, false, newArgs, -1,
aggregate.getGroupSet().cardinality(), relBuilder.peek(),
aggCall.getType(), aggCall.getName());
} else {
SqlSumEmptyIsZeroAggFunction sumAgg = new SqlSumEmptyIsZeroAggFunction();
newCall =
AggregateCall.create(sumAgg, false, newArgs, -1,
aggregate.getGroupSet().cardinality(), relBuilder.peek(),
aggCall.getType(), aggCall.getName());
}
} else {
newCall =
AggregateCall.create(aggCall.getAggregation(), false, newArgs, -1,
aggregate.getGroupSet().cardinality(),
relBuilder.peek(), aggCall.getType(), aggCall.name);
}
}
newTopAggCalls.set(i, newCall);
}
// Populate the group-by keys with the remapped arguments for aggregate A
newGroupSet.clear();
for (int arg : aggregate.getGroupSet()) {
newGroupSet.add(sourceOf.get(arg));
}
relBuilder.push(
aggregate.copy(aggregate.getTraitSet(),
relBuilder.build(), aggregate.indicator,
ImmutableBitSet.of(newGroupSet), null, newTopAggCalls));
return relBuilder;
}
*/
@SuppressWarnings("DanglingJavadoc")
private void rewriteUsingGroupingSets(RelOptRuleCall call, Aggregate aggregate, Set<Pair<List<Integer>, Integer>> argLists) {
final Set<ImmutableBitSet> groupSetTreeSet = new TreeSet<>(ImmutableBitSet.ORDERING);
groupSetTreeSet.add(aggregate.getGroupSet());
for (Pair<List<Integer>, Integer> argList : argLists) {
groupSetTreeSet.add(ImmutableBitSet.of(argList.left).setIf(argList.right, argList.right >= 0).union(aggregate.getGroupSet()));
}
final ImmutableList<ImmutableBitSet> groupSets = ImmutableList.copyOf(groupSetTreeSet);
final ImmutableBitSet fullGroupSet = ImmutableBitSet.union(groupSets);
final List<AggregateCall> distinctAggCalls = new ArrayList<>();
for (Pair<AggregateCall, String> aggCall : aggregate.getNamedAggCalls()) {
if (!aggCall.left.isDistinct()) {
distinctAggCalls.add(aggCall.left.rename(aggCall.right));
}
}
final RelBuilder relBuilder = call.builder();
relBuilder.push(aggregate.getInput());
relBuilder.aggregate(relBuilder.groupKey(fullGroupSet, groupSets.size() > 1, groupSets), distinctAggCalls);
final RelNode distinct = relBuilder.peek();
final int groupCount = fullGroupSet.cardinality();
final int indicatorCount = groupSets.size() > 1 ? groupCount : 0;
final RelOptCluster cluster = aggregate.getCluster();
final RexBuilder rexBuilder = cluster.getRexBuilder();
final RelDataTypeFactory typeFactory = cluster.getTypeFactory();
final RelDataType booleanType = typeFactory.createTypeWithNullability(typeFactory.createSqlType(SqlTypeName.BOOLEAN), false);
final List<Pair<RexNode, String>> predicates = new ArrayList<>();
final Map<ImmutableBitSet, Integer> filters = new HashMap<>();
/** Function to register a filter for a group set. */
class Registrar {
RexNode group = null;
private int register(ImmutableBitSet groupSet) {
if (group == null) {
group = makeGroup(groupCount - 1);
}
final RexNode node = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, group, rexBuilder.makeExactLiteral(toNumber(remap(fullGroupSet, groupSet))));
predicates.add(Pair.of(node, toString(groupSet)));
return groupCount + indicatorCount + distinctAggCalls.size() + predicates.size() - 1;
}
private RexNode makeGroup(int i) {
final RexInputRef ref = rexBuilder.makeInputRef(booleanType, groupCount + i);
final RexNode kase = rexBuilder.makeCall(SqlStdOperatorTable.CASE, ref, rexBuilder.makeExactLiteral(BigDecimal.ZERO), rexBuilder.makeExactLiteral(TWO.pow(i)));
if (i == 0) {
return kase;
} else {
return rexBuilder.makeCall(SqlStdOperatorTable.PLUS, makeGroup(i - 1), kase);
}
}
private BigDecimal toNumber(ImmutableBitSet bitSet) {
BigDecimal n = BigDecimal.ZERO;
for (int key : bitSet) {
n = n.add(TWO.pow(key));
}
return n;
}
private String toString(ImmutableBitSet bitSet) {
final StringBuilder buf = new StringBuilder("$i");
for (int key : bitSet) {
buf.append(key).append('_');
}
return buf.substring(0, buf.length() - 1);
}
}
final Registrar registrar = new Registrar();
for (ImmutableBitSet groupSet : groupSets) {
filters.put(groupSet, registrar.register(groupSet));
}
if (!predicates.isEmpty()) {
List<Pair<RexNode, String>> nodes = new ArrayList<>();
for (RelDataTypeField f : relBuilder.peek().getRowType().getFieldList()) {
final RexNode node = rexBuilder.makeInputRef(f.getType(), f.getIndex());
nodes.add(Pair.of(node, f.getName()));
}
nodes.addAll(predicates);
relBuilder.project(Pair.left(nodes), Pair.right(nodes));
}
int x = groupCount + indicatorCount;
final List<AggregateCall> newCalls = new ArrayList<>();
for (AggregateCall aggCall : aggregate.getAggCallList()) {
final int newFilterArg;
final List<Integer> newArgList;
final SqlAggFunction aggregation;
if (!aggCall.isDistinct()) {
aggregation = SqlStdOperatorTable.MIN;
newArgList = ImmutableIntList.of(x++);
newFilterArg = filters.get(aggregate.getGroupSet());
} else {
aggregation = aggCall.getAggregation();
newArgList = remap(fullGroupSet, aggCall.getArgList());
newFilterArg = filters.get(ImmutableBitSet.of(aggCall.getArgList()).setIf(aggCall.filterArg, aggCall.filterArg >= 0).union(aggregate.getGroupSet()));
}
final AggregateCall newCall = AggregateCall.create(aggregation, false, newArgList, newFilterArg, aggregate.getGroupCount(), distinct, null, aggCall.name);
newCalls.add(newCall);
}
relBuilder.aggregate(relBuilder.groupKey(remap(fullGroupSet, aggregate.getGroupSet()), aggregate.indicator, remap(fullGroupSet, aggregate.getGroupSets())), newCalls);
relBuilder.convert(aggregate.getRowType(), true);
call.transformTo(relBuilder.build());
}
use of org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class FlinkRelDecorrelator method createValueGenerator.
/**
* Create RelNode tree that produces a list of correlated variables.
*
* @param correlations correlated variables to generate
* @param valueGenFieldOffset offset in the output that generated columns
* will start
* @param mapCorVarToOutputPos output positions for the correlated variables
* generated
* @return RelNode the root of the resultant RelNode tree
*/
private RelNode createValueGenerator(Iterable<Correlation> correlations, int valueGenFieldOffset, SortedMap<Correlation, Integer> mapCorVarToOutputPos) {
final Map<RelNode, List<Integer>> mapNewInputToOutputPos = new HashMap<>();
final Map<RelNode, Integer> mapNewInputToNewOffset = new HashMap<>();
// Add to map all the referenced positions (relative to each input rel).
for (Correlation corVar : correlations) {
final int oldCorVarOffset = corVar.field;
final RelNode oldInput = getCorRel(corVar);
assert oldInput != null;
final Frame frame = map.get(oldInput);
assert frame != null;
final RelNode newInput = frame.r;
final List<Integer> newLocalOutputPosList;
if (!mapNewInputToOutputPos.containsKey(newInput)) {
newLocalOutputPosList = Lists.newArrayList();
} else {
newLocalOutputPosList = mapNewInputToOutputPos.get(newInput);
}
final int newCorVarOffset = frame.oldToNewOutputPos.get(oldCorVarOffset);
// Add all unique positions referenced.
if (!newLocalOutputPosList.contains(newCorVarOffset)) {
newLocalOutputPosList.add(newCorVarOffset);
}
mapNewInputToOutputPos.put(newInput, newLocalOutputPosList);
}
int offset = 0;
// Project only the correlated fields out of each inputRel
// and join the projectRel together.
// To make sure the plan does not change in terms of join order,
// join these rels based on their occurrence in cor var list which
// is sorted.
final Set<RelNode> joinedInputRelSet = Sets.newHashSet();
RelNode r = null;
for (Correlation corVar : correlations) {
final RelNode oldInput = getCorRel(corVar);
assert oldInput != null;
final RelNode newInput = map.get(oldInput).r;
assert newInput != null;
if (!joinedInputRelSet.contains(newInput)) {
RelNode project = RelOptUtil.createProject(newInput, mapNewInputToOutputPos.get(newInput));
RelNode distinct = RelOptUtil.createDistinctRel(project);
RelOptCluster cluster = distinct.getCluster();
joinedInputRelSet.add(newInput);
mapNewInputToNewOffset.put(newInput, offset);
offset += distinct.getRowType().getFieldCount();
if (r == null) {
r = distinct;
} else {
r = LogicalJoin.create(r, distinct, cluster.getRexBuilder().makeLiteral(true), ImmutableSet.<CorrelationId>of(), JoinRelType.INNER);
}
}
}
// referencing correlated variables.
for (Correlation corVar : correlations) {
// The first input of a Correlator is always the rel defining
// the correlated variables.
final RelNode oldInput = getCorRel(corVar);
assert oldInput != null;
final Frame frame = map.get(oldInput);
final RelNode newInput = frame.r;
assert newInput != null;
final List<Integer> newLocalOutputPosList = mapNewInputToOutputPos.get(newInput);
final int newLocalOutputPos = frame.oldToNewOutputPos.get(corVar.field);
// newOutputPos is the index of the cor var in the referenced
// position list plus the offset of referenced position list of
// each newInput.
final int newOutputPos = newLocalOutputPosList.indexOf(newLocalOutputPos) + mapNewInputToNewOffset.get(newInput) + valueGenFieldOffset;
if (mapCorVarToOutputPos.containsKey(corVar)) {
assert mapCorVarToOutputPos.get(corVar) == newOutputPos;
}
mapCorVarToOutputPos.put(corVar, newOutputPos);
}
return r;
}
use of org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class FlinkRelDecorrelator method decorrelateQuery.
//~ Methods ----------------------------------------------------------------
/**
* Decorrelates a query.
* <p>
* <p>This is the main entry point to {@code FlinkRelDecorrelator}.
*
* @param rootRel Root node of the query
* @return Equivalent query with all
* {@link LogicalCorrelate} instances removed
*/
public static RelNode decorrelateQuery(RelNode rootRel) {
final CorelMap corelMap = new CorelMapBuilder().build(rootRel);
if (!corelMap.hasCorrelation()) {
return rootRel;
}
final RelOptCluster cluster = rootRel.getCluster();
final FlinkRelDecorrelator decorrelator = new FlinkRelDecorrelator(cluster, corelMap, cluster.getPlanner().getContext());
RelNode newRootRel = decorrelator.removeCorrelationViaRule(rootRel);
if (SQL2REL_LOGGER.isDebugEnabled()) {
SQL2REL_LOGGER.debug(RelOptUtil.dumpPlan("Plan after removing Correlator", newRootRel, false, SqlExplainLevel.EXPPLAN_ATTRIBUTES));
}
if (!decorrelator.cm.mapCorVarToCorRel.isEmpty()) {
newRootRel = decorrelator.decorrelate(newRootRel);
}
return newRootRel;
}
Aggregations