use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexNode in project druid by druid-io.
the class DruidPlanner method planWithBindableConvention.
private PlannerResult planWithBindableConvention(final SqlExplain explain, final RelRoot root) throws RelConversionException {
BindableRel bindableRel = (BindableRel) planner.transform(Rules.BINDABLE_CONVENTION_RULES, planner.getEmptyTraitSet().replace(BindableConvention.INSTANCE).plus(root.collation), root.rel);
if (!root.isRefTrivial()) {
// Add a projection on top to accommodate root.fields.
final List<RexNode> projects = new ArrayList<>();
final RexBuilder rexBuilder = bindableRel.getCluster().getRexBuilder();
for (int field : Pair.left(root.fields)) {
projects.add(rexBuilder.makeInputRef(bindableRel, field));
}
bindableRel = new Bindables.BindableProject(bindableRel.getCluster(), bindableRel.getTraitSet(), bindableRel, projects, root.validatedRowType);
}
if (explain != null) {
return planExplanation(bindableRel, explain);
} else {
final BindableRel theRel = bindableRel;
final DataContext dataContext = plannerContext.createDataContext((JavaTypeFactory) planner.getTypeFactory());
final Supplier<Sequence<Object[]>> resultsSupplier = new Supplier<Sequence<Object[]>>() {
@Override
public Sequence<Object[]> get() {
final Enumerable enumerable = theRel.bind(dataContext);
return Sequences.simple(enumerable);
}
};
return new PlannerResult(resultsSupplier, root.validatedRowType);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexNode in project drill by apache.
the class LimitUnionExchangeTransposeRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final LimitPrel limit = (LimitPrel) call.rel(0);
final UnionExchangePrel unionExchangePrel = (UnionExchangePrel) call.rel(1);
RelNode child = unionExchangePrel.getInput();
final int offset = limit.getOffset() != null ? Math.max(0, RexLiteral.intValue(limit.getOffset())) : 0;
final int fetch = Math.max(0, RexLiteral.intValue(limit.getFetch()));
// child Limit uses conservative approach: use offset 0 and fetch = parent limit offset + parent limit fetch.
final RexNode childFetch = limit.getCluster().getRexBuilder().makeExactLiteral(BigDecimal.valueOf(offset + fetch));
final RelNode limitUnderExchange = new LimitPrel(child.getCluster(), child.getTraitSet(), child, null, childFetch);
final RelNode newUnionExch = new UnionExchangePrel(unionExchangePrel.getCluster(), unionExchangePrel.getTraitSet(), limitUnderExchange);
final RelNode limitAboveExchange = new LimitPrel(limit.getCluster(), limit.getTraitSet(), newUnionExch, limit.getOffset(), limit.getFetch(), true);
call.transformTo(limitAboveExchange);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexNode in project drill by apache.
the class MongoPushDownFilterForScan method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final ScanPrel scan = (ScanPrel) call.rel(1);
final FilterPrel filter = (FilterPrel) call.rel(0);
final RexNode condition = filter.getCondition();
MongoGroupScan groupScan = (MongoGroupScan) scan.getGroupScan();
if (groupScan.isFilterPushedDown()) {
return;
}
LogicalExpression conditionExp = DrillOptiq.toDrill(new DrillParseContext(PrelUtil.getPlannerSettings(call.getPlanner())), scan, condition);
MongoFilterBuilder mongoFilterBuilder = new MongoFilterBuilder(groupScan, conditionExp);
MongoScanSpec newScanSpec = mongoFilterBuilder.parseTree();
if (newScanSpec == null) {
// no filter pushdown so nothing to apply.
return;
}
MongoGroupScan newGroupsScan = null;
try {
newGroupsScan = new MongoGroupScan(groupScan.getUserName(), groupScan.getStoragePlugin(), newScanSpec, groupScan.getColumns());
} catch (IOException e) {
logger.error(e.getMessage(), e);
throw new DrillRuntimeException(e.getMessage(), e);
}
newGroupsScan.setFilterPushedDown(true);
final ScanPrel newScanPrel = ScanPrel.create(scan, filter.getTraitSet(), newGroupsScan, scan.getRowType());
if (mongoFilterBuilder.isAllExpressionsConverted()) {
/*
* Since we could convert the entire filter condition expression into an
* Mongo filter, we can eliminate the filter operator altogether.
*/
call.transformTo(newScanPrel);
} else {
call.transformTo(filter.copy(filter.getTraitSet(), ImmutableList.of((RelNode) newScanPrel)));
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexNode in project flink by apache.
the class FlinkRelDecorrelator method decorrelateRel.
/**
* Rewrite LogicalProject.
*
* @param rel the project rel to rewrite
*/
public Frame decorrelateRel(LogicalProject rel) {
//
// Rewrite logic:
//
// 1. Pass along any correlated variables coming from the input.
//
final RelNode oldInput = rel.getInput();
Frame frame = getInvoke(oldInput, rel);
if (frame == null) {
// If input has not been rewritten, do not rewrite this rel.
return null;
}
final List<RexNode> oldProjects = rel.getProjects();
final List<RelDataTypeField> relOutput = rel.getRowType().getFieldList();
// LogicalProject projects the original expressions,
// plus any correlated variables the input wants to pass along.
final List<Pair<RexNode, String>> projects = Lists.newArrayList();
// and produce the correlated variables in the new output.
if (cm.mapRefRelToCorVar.containsKey(rel)) {
decorrelateInputWithValueGenerator(rel);
// The old input should be mapped to the LogicalJoin created by
// rewriteInputWithValueGenerator().
frame = map.get(oldInput);
}
// LogicalProject projects the original expressions
final Map<Integer, Integer> mapOldToNewOutputPos = Maps.newHashMap();
int newPos;
for (newPos = 0; newPos < oldProjects.size(); newPos++) {
projects.add(newPos, Pair.of(decorrelateExpr(oldProjects.get(newPos)), relOutput.get(newPos).getName()));
mapOldToNewOutputPos.put(newPos, newPos);
}
// Project any correlated variables the input wants to pass along.
final SortedMap<Correlation, Integer> mapCorVarToOutputPos = new TreeMap<>();
for (Map.Entry<Correlation, Integer> entry : frame.corVarOutputPos.entrySet()) {
projects.add(RexInputRef.of2(entry.getValue(), frame.r.getRowType().getFieldList()));
mapCorVarToOutputPos.put(entry.getKey(), newPos);
newPos++;
}
RelNode newProject = RelOptUtil.createProject(frame.r, projects, false);
return register(rel, newProject, mapOldToNewOutputPos, mapCorVarToOutputPos);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexNode in project flink by apache.
the class FlinkRelDecorrelator method decorrelateRel.
/**
* Rewrite Correlator into a left outer join.
*
* @param rel Correlator
*/
public Frame decorrelateRel(LogicalCorrelate rel) {
//
// Rewrite logic:
//
// The original left input will be joined with the new right input that
// has generated correlated variables propagated up. For any generated
// cor vars that are not used in the join key, pass them along to be
// joined later with the CorrelatorRels that produce them.
//
// the right input to Correlator should produce correlated variables
final RelNode oldLeft = rel.getInput(0);
final RelNode oldRight = rel.getInput(1);
final Frame leftFrame = getInvoke(oldLeft, rel);
final Frame rightFrame = getInvoke(oldRight, rel);
if (leftFrame == null || rightFrame == null) {
// If any input has not been rewritten, do not rewrite this rel.
return null;
}
if (rightFrame.corVarOutputPos.isEmpty()) {
return null;
}
assert rel.getRequiredColumns().cardinality() <= rightFrame.corVarOutputPos.keySet().size();
// Change correlator rel into a join.
// Join all the correlated variables produced by this correlator rel
// with the values generated and propagated from the right input
final SortedMap<Correlation, Integer> corVarOutputPos = new TreeMap<>(rightFrame.corVarOutputPos);
final List<RexNode> conditions = new ArrayList<>();
final List<RelDataTypeField> newLeftOutput = leftFrame.r.getRowType().getFieldList();
int newLeftFieldCount = newLeftOutput.size();
final List<RelDataTypeField> newRightOutput = rightFrame.r.getRowType().getFieldList();
for (Map.Entry<Correlation, Integer> rightOutputPos : Lists.newArrayList(corVarOutputPos.entrySet())) {
final Correlation corVar = rightOutputPos.getKey();
if (!corVar.corr.equals(rel.getCorrelationId())) {
continue;
}
final int newLeftPos = leftFrame.oldToNewOutputPos.get(corVar.field);
final int newRightPos = rightOutputPos.getValue();
conditions.add(rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, RexInputRef.of(newLeftPos, newLeftOutput), new RexInputRef(newLeftFieldCount + newRightPos, newRightOutput.get(newRightPos).getType())));
// remove this cor var from output position mapping
corVarOutputPos.remove(corVar);
}
// vars that are not used in the join key.
for (Correlation corVar : corVarOutputPos.keySet()) {
int newPos = corVarOutputPos.get(corVar) + newLeftFieldCount;
corVarOutputPos.put(corVar, newPos);
}
// then add any cor var from the left input. Do not need to change
// output positions.
corVarOutputPos.putAll(leftFrame.corVarOutputPos);
// Create the mapping between the output of the old correlation rel
// and the new join rel
final Map<Integer, Integer> mapOldToNewOutputPos = Maps.newHashMap();
int oldLeftFieldCount = oldLeft.getRowType().getFieldCount();
int oldRightFieldCount = oldRight.getRowType().getFieldCount();
assert rel.getRowType().getFieldCount() == oldLeftFieldCount + oldRightFieldCount;
// Left input positions are not changed.
mapOldToNewOutputPos.putAll(leftFrame.oldToNewOutputPos);
// Right input positions are shifted by newLeftFieldCount.
for (int i = 0; i < oldRightFieldCount; i++) {
mapOldToNewOutputPos.put(i + oldLeftFieldCount, rightFrame.oldToNewOutputPos.get(i) + newLeftFieldCount);
}
final RexNode condition = RexUtil.composeConjunction(rexBuilder, conditions, false);
RelNode newJoin = LogicalJoin.create(leftFrame.r, rightFrame.r, condition, ImmutableSet.<CorrelationId>of(), rel.getJoinType().toJoinType());
return register(rel, newJoin, mapOldToNewOutputPos, corVarOutputPos);
}
Aggregations