use of org.apache.calcite.rex.RexLiteral in project hive by apache.
the class HivePreFilteringRule method extractCommonOperands.
private static List<RexNode> extractCommonOperands(RexBuilder rexBuilder, RexNode condition, int maxCNFNodeCount) {
assert condition.getKind() == SqlKind.OR;
Multimap<String, RexNode> reductionCondition = LinkedHashMultimap.create();
// Data structure to control whether a certain reference is present in every
// operand
Set<String> refsInAllOperands = null;
// 1. We extract the information necessary to create the predicate for the
// new filter; currently we support comparison functions, in and between
ImmutableList<RexNode> operands = RexUtil.flattenOr(((RexCall) condition).getOperands());
for (int i = 0; i < operands.size(); i++) {
final RexNode operand = operands.get(i);
final RexNode operandCNF = RexUtil.toCnf(rexBuilder, maxCNFNodeCount, operand);
final List<RexNode> conjunctions = RelOptUtil.conjunctions(operandCNF);
Set<String> refsInCurrentOperand = Sets.newHashSet();
for (RexNode conjunction : conjunctions) {
// We do not know what it is, we bail out for safety
if (!(conjunction instanceof RexCall) || !HiveCalciteUtil.isDeterministic(conjunction)) {
return new ArrayList<>();
}
RexCall conjCall = (RexCall) conjunction;
RexNode ref = null;
if (COMPARISON.contains(conjCall.getOperator().getKind())) {
if (conjCall.operands.get(0) instanceof RexInputRef && conjCall.operands.get(1) instanceof RexLiteral) {
ref = conjCall.operands.get(0);
} else if (conjCall.operands.get(1) instanceof RexInputRef && conjCall.operands.get(0) instanceof RexLiteral) {
ref = conjCall.operands.get(1);
} else {
// We do not know what it is, we bail out for safety
return new ArrayList<>();
}
} else if (conjCall.getOperator().getKind().equals(SqlKind.IN)) {
ref = conjCall.operands.get(0);
} else if (conjCall.getOperator().getKind().equals(SqlKind.BETWEEN)) {
ref = conjCall.operands.get(1);
} else {
// We do not know what it is, we bail out for safety
return new ArrayList<>();
}
String stringRef = ref.toString();
reductionCondition.put(stringRef, conjCall);
refsInCurrentOperand.add(stringRef);
}
// Updates the references that are present in every operand up till now
if (i == 0) {
refsInAllOperands = refsInCurrentOperand;
} else {
refsInAllOperands = Sets.intersection(refsInAllOperands, refsInCurrentOperand);
}
// bail out
if (refsInAllOperands.isEmpty()) {
return new ArrayList<>();
}
}
// 2. We gather the common factors and return them
List<RexNode> commonOperands = new ArrayList<>();
for (String ref : refsInAllOperands) {
commonOperands.add(RexUtil.composeDisjunction(rexBuilder, reductionCondition.get(ref), false));
}
return commonOperands;
}
use of org.apache.calcite.rex.RexLiteral in project flink by apache.
the class FlinkRelDecorrelator method decorrelateRel.
/**
* Rewrites a {@link LogicalAggregate}.
*
* @param rel Aggregate to rewrite
*/
public Frame decorrelateRel(LogicalAggregate rel) {
if (rel.getGroupType() != Aggregate.Group.SIMPLE) {
throw new AssertionError(Bug.CALCITE_461_FIXED);
}
// Aggregate itself should not reference cor vars.
assert !cm.mapRefRelToCorVar.containsKey(rel);
final RelNode oldInput = rel.getInput();
final Frame frame = getInvoke(oldInput, rel);
if (frame == null) {
// If input has not been rewritten, do not rewrite this rel.
return null;
}
final RelNode newInput = frame.r;
// map from newInput
Map<Integer, Integer> mapNewInputToProjOutputPos = Maps.newHashMap();
final int oldGroupKeyCount = rel.getGroupSet().cardinality();
// Project projects the original expressions,
// plus any correlated variables the input wants to pass along.
final List<Pair<RexNode, String>> projects = Lists.newArrayList();
List<RelDataTypeField> newInputOutput = newInput.getRowType().getFieldList();
int newPos = 0;
// oldInput has the original group by keys in the front.
final NavigableMap<Integer, RexLiteral> omittedConstants = new TreeMap<>();
for (int i = 0; i < oldGroupKeyCount; i++) {
final RexLiteral constant = projectedLiteral(newInput, i);
if (constant != null) {
// Exclude constants. Aggregate({true}) occurs because Aggregate({})
// would generate 1 row even when applied to an empty table.
omittedConstants.put(i, constant);
continue;
}
int newInputPos = frame.oldToNewOutputPos.get(i);
projects.add(RexInputRef.of2(newInputPos, newInputOutput));
mapNewInputToProjOutputPos.put(newInputPos, newPos);
newPos++;
}
final SortedMap<Correlation, Integer> mapCorVarToOutputPos = new TreeMap<>();
if (!frame.corVarOutputPos.isEmpty()) {
// position oldGroupKeyCount.
for (Map.Entry<Correlation, Integer> entry : frame.corVarOutputPos.entrySet()) {
projects.add(RexInputRef.of2(entry.getValue(), newInputOutput));
mapCorVarToOutputPos.put(entry.getKey(), newPos);
mapNewInputToProjOutputPos.put(entry.getValue(), newPos);
newPos++;
}
}
// add the remaining fields
final int newGroupKeyCount = newPos;
for (int i = 0; i < newInputOutput.size(); i++) {
if (!mapNewInputToProjOutputPos.containsKey(i)) {
projects.add(RexInputRef.of2(i, newInputOutput));
mapNewInputToProjOutputPos.put(i, newPos);
newPos++;
}
}
assert newPos == newInputOutput.size();
// This Project will be what the old input maps to,
// replacing any previous mapping from old input).
RelNode newProject = RelOptUtil.createProject(newInput, projects, false);
// update mappings:
// oldInput ----> newInput
//
// newProject
// |
// oldInput ----> newInput
//
// is transformed to
//
// oldInput ----> newProject
// |
// newInput
Map<Integer, Integer> combinedMap = Maps.newHashMap();
for (Integer oldInputPos : frame.oldToNewOutputPos.keySet()) {
combinedMap.put(oldInputPos, mapNewInputToProjOutputPos.get(frame.oldToNewOutputPos.get(oldInputPos)));
}
register(oldInput, newProject, combinedMap, mapCorVarToOutputPos);
// now it's time to rewrite the Aggregate
final ImmutableBitSet newGroupSet = ImmutableBitSet.range(newGroupKeyCount);
List<AggregateCall> newAggCalls = Lists.newArrayList();
List<AggregateCall> oldAggCalls = rel.getAggCallList();
int oldInputOutputFieldCount = rel.getGroupSet().cardinality();
int newInputOutputFieldCount = newGroupSet.cardinality();
int i = -1;
for (AggregateCall oldAggCall : oldAggCalls) {
++i;
List<Integer> oldAggArgs = oldAggCall.getArgList();
List<Integer> aggArgs = Lists.newArrayList();
// for the argument.
for (int oldPos : oldAggArgs) {
aggArgs.add(combinedMap.get(oldPos));
}
final int filterArg = oldAggCall.filterArg < 0 ? oldAggCall.filterArg : combinedMap.get(oldAggCall.filterArg);
newAggCalls.add(oldAggCall.adaptTo(newProject, aggArgs, filterArg, oldGroupKeyCount, newGroupKeyCount));
// The old to new output position mapping will be the same as that
// of newProject, plus any aggregates that the oldAgg produces.
combinedMap.put(oldInputOutputFieldCount + i, newInputOutputFieldCount + i);
}
relBuilder.push(LogicalAggregate.create(newProject, false, newGroupSet, null, newAggCalls));
if (!omittedConstants.isEmpty()) {
final List<RexNode> postProjects = new ArrayList<>(relBuilder.fields());
for (Map.Entry<Integer, RexLiteral> entry : omittedConstants.descendingMap().entrySet()) {
postProjects.add(entry.getKey() + frame.corVarOutputPos.size(), entry.getValue());
}
relBuilder.project(postProjects);
}
// located at the same position as the input newProject.
return register(rel, relBuilder.build(), combinedMap, mapCorVarToOutputPos);
}
use of org.apache.calcite.rex.RexLiteral in project druid by druid-io.
the class GroupByRules method translateAggregateCall.
/**
* Translate an AggregateCall to Druid equivalents.
*
* @return translated aggregation, or null if translation failed.
*/
private static Aggregation translateAggregateCall(final PlannerContext plannerContext, final RowSignature sourceRowSignature, final Project project, final AggregateCall call, final DruidOperatorTable operatorTable, final List<Aggregation> existingAggregations, final int aggNumber, final boolean approximateCountDistinct) {
final List<DimFilter> filters = Lists.newArrayList();
final List<String> rowOrder = sourceRowSignature.getRowOrder();
final String name = aggOutputName(aggNumber);
final SqlKind kind = call.getAggregation().getKind();
final SqlTypeName outputType = call.getType().getSqlTypeName();
if (call.filterArg >= 0) {
// AGG(xxx) FILTER(WHERE yyy)
if (project == null) {
// We need some kind of projection to support filtered aggregations.
return null;
}
final RexNode expression = project.getChildExps().get(call.filterArg);
final DimFilter filter = Expressions.toFilter(operatorTable, plannerContext, sourceRowSignature, expression);
if (filter == null) {
return null;
}
filters.add(filter);
}
if (kind == SqlKind.COUNT && call.getArgList().isEmpty()) {
// COUNT(*)
return Aggregation.create(new CountAggregatorFactory(name)).filter(makeFilter(filters, sourceRowSignature));
} else if (kind == SqlKind.COUNT && call.isDistinct()) {
// COUNT(DISTINCT x)
return approximateCountDistinct ? APPROX_COUNT_DISTINCT.toDruidAggregation(name, sourceRowSignature, operatorTable, plannerContext, existingAggregations, project, call, makeFilter(filters, sourceRowSignature)) : null;
} else if (kind == SqlKind.COUNT || kind == SqlKind.SUM || kind == SqlKind.SUM0 || kind == SqlKind.MIN || kind == SqlKind.MAX || kind == SqlKind.AVG) {
// Built-in agg, not distinct, not COUNT(*)
boolean forceCount = false;
final FieldOrExpression input;
final int inputField = Iterables.getOnlyElement(call.getArgList());
final RexNode rexNode = Expressions.fromFieldAccess(sourceRowSignature, project, inputField);
final FieldOrExpression foe = FieldOrExpression.fromRexNode(operatorTable, plannerContext, rowOrder, rexNode);
if (foe != null) {
input = foe;
} else if (rexNode.getKind() == SqlKind.CASE && ((RexCall) rexNode).getOperands().size() == 3) {
// Possibly a CASE-style filtered aggregation. Styles supported:
// A: SUM(CASE WHEN x = 'foo' THEN cnt END) => operands (x = 'foo', cnt, null)
// B: SUM(CASE WHEN x = 'foo' THEN 1 ELSE 0 END) => operands (x = 'foo', 1, 0)
// C: COUNT(CASE WHEN x = 'foo' THEN 'dummy' END) => operands (x = 'foo', 'dummy', null)
// If the null and non-null args are switched, "flip" is set, which negates the filter.
final RexCall caseCall = (RexCall) rexNode;
final boolean flip = RexLiteral.isNullLiteral(caseCall.getOperands().get(1)) && !RexLiteral.isNullLiteral(caseCall.getOperands().get(2));
final RexNode arg1 = caseCall.getOperands().get(flip ? 2 : 1);
final RexNode arg2 = caseCall.getOperands().get(flip ? 1 : 2);
// Operand 1: Filter
final DimFilter filter = Expressions.toFilter(operatorTable, plannerContext, sourceRowSignature, caseCall.getOperands().get(0));
if (filter == null) {
return null;
} else {
filters.add(flip ? new NotDimFilter(filter) : filter);
}
if (call.getAggregation().getKind() == SqlKind.COUNT && arg1 instanceof RexLiteral && !RexLiteral.isNullLiteral(arg1) && RexLiteral.isNullLiteral(arg2)) {
// Case C
forceCount = true;
input = null;
} else if (call.getAggregation().getKind() == SqlKind.SUM && arg1 instanceof RexLiteral && ((Number) RexLiteral.value(arg1)).intValue() == 1 && arg2 instanceof RexLiteral && ((Number) RexLiteral.value(arg2)).intValue() == 0) {
// Case B
forceCount = true;
input = null;
} else if (RexLiteral.isNullLiteral(arg2)) {
// Maybe case A
input = FieldOrExpression.fromRexNode(operatorTable, plannerContext, rowOrder, arg1);
if (input == null) {
return null;
}
} else {
// Can't translate CASE into a filter.
return null;
}
} else {
// Can't translate operand.
return null;
}
if (!forceCount) {
Preconditions.checkNotNull(input, "WTF?! input was null for non-COUNT aggregation");
}
if (forceCount || kind == SqlKind.COUNT) {
// COUNT(x)
return Aggregation.create(new CountAggregatorFactory(name)).filter(makeFilter(filters, sourceRowSignature));
} else {
// Built-in aggregator that is not COUNT.
final Aggregation retVal;
final String fieldName = input.getFieldName();
final String expression = input.getExpression();
final boolean isLong = SqlTypeName.INT_TYPES.contains(outputType) || SqlTypeName.TIMESTAMP == outputType || SqlTypeName.DATE == outputType;
if (kind == SqlKind.SUM || kind == SqlKind.SUM0) {
retVal = isLong ? Aggregation.create(new LongSumAggregatorFactory(name, fieldName, expression)) : Aggregation.create(new DoubleSumAggregatorFactory(name, fieldName, expression));
} else if (kind == SqlKind.MIN) {
retVal = isLong ? Aggregation.create(new LongMinAggregatorFactory(name, fieldName, expression)) : Aggregation.create(new DoubleMinAggregatorFactory(name, fieldName, expression));
} else if (kind == SqlKind.MAX) {
retVal = isLong ? Aggregation.create(new LongMaxAggregatorFactory(name, fieldName, expression)) : Aggregation.create(new DoubleMaxAggregatorFactory(name, fieldName, expression));
} else if (kind == SqlKind.AVG) {
final String sumName = aggInternalName(aggNumber, "sum");
final String countName = aggInternalName(aggNumber, "count");
final AggregatorFactory sum = isLong ? new LongSumAggregatorFactory(sumName, fieldName, expression) : new DoubleSumAggregatorFactory(sumName, fieldName, expression);
final AggregatorFactory count = new CountAggregatorFactory(countName);
retVal = Aggregation.create(ImmutableList.of(sum, count), new ArithmeticPostAggregator(name, "quotient", ImmutableList.<PostAggregator>of(new FieldAccessPostAggregator(null, sumName), new FieldAccessPostAggregator(null, countName))));
} else {
// Not reached.
throw new ISE("WTF?! Kind[%s] got into the built-in aggregator path somehow?!", kind);
}
return retVal.filter(makeFilter(filters, sourceRowSignature));
}
} else {
// Not a built-in aggregator, check operator table.
final SqlAggregator sqlAggregator = operatorTable.lookupAggregator(call.getAggregation().getName());
return sqlAggregator != null ? sqlAggregator.toDruidAggregation(name, sourceRowSignature, operatorTable, plannerContext, existingAggregations, project, call, makeFilter(filters, sourceRowSignature)) : null;
}
}
use of org.apache.calcite.rex.RexLiteral in project druid by druid-io.
the class FloorExtractionOperator method convert.
@Override
public RowExtraction convert(final DruidOperatorTable operatorTable, final PlannerContext plannerContext, final List<String> rowOrder, final RexNode expression) {
final RexCall call = (RexCall) expression;
final RexNode arg = call.getOperands().get(0);
final RowExtraction rex = Expressions.toRowExtraction(operatorTable, plannerContext, rowOrder, arg);
if (rex == null) {
return null;
} else if (call.getOperands().size() == 1) {
// FLOOR(expr)
return RowExtraction.of(rex.getColumn(), ExtractionFns.compose(new BucketExtractionFn(1.0, 0.0), rex.getExtractionFn()));
} else if (call.getOperands().size() == 2) {
// FLOOR(expr TO timeUnit)
final RexLiteral flag = (RexLiteral) call.getOperands().get(1);
final TimeUnitRange timeUnit = (TimeUnitRange) flag.getValue();
return applyTimestampFloor(rex, TimeUnits.toQueryGranularity(timeUnit, plannerContext.getTimeZone()));
} else {
// WTF? FLOOR with 3 arguments?
return null;
}
}
use of org.apache.calcite.rex.RexLiteral in project herddb by diennea.
the class CalcitePlanner method planValues.
private PlannerOp planValues(EnumerableValues op) {
List<List<CompiledSQLExpression>> tuples = new ArrayList<>(op.getTuples().size());
RelDataType rowType = op.getRowType();
List<RelDataTypeField> fieldList = rowType.getFieldList();
Column[] columns = new Column[fieldList.size()];
for (ImmutableList<RexLiteral> tuple : op.getTuples()) {
List<CompiledSQLExpression> row = new ArrayList<>(tuple.size());
for (RexLiteral node : tuple) {
CompiledSQLExpression exp = SQLExpressionCompiler.compileExpression(node);
row.add(exp);
}
tuples.add(row);
}
int i = 0;
String[] fieldNames = new String[fieldList.size()];
for (RelDataTypeField field : fieldList) {
Column col = Column.column(field.getName(), convertToHerdType(field.getType()));
fieldNames[i] = field.getName();
columns[i++] = col;
}
return new ValuesOp(manager.getNodeId(), fieldNames, columns, tuples);
}
Aggregations