use of org.apache.calcite.rel.core.Project in project druid by druid-io.
the class DruidQuery method computeSorting.
@Nonnull
private static Sorting computeSorting(final PartialDruidQuery partialQuery, final PlannerContext plannerContext, final RowSignature rowSignature, @Nullable final VirtualColumnRegistry virtualColumnRegistry) {
final Sort sort = Preconditions.checkNotNull(partialQuery.getSort(), "sort");
final Project sortProject = partialQuery.getSortProject();
// Extract limit and offset.
final OffsetLimit offsetLimit = OffsetLimit.fromSort(sort);
// Extract orderBy column specs.
final List<OrderByColumnSpec> orderBys = new ArrayList<>(sort.getChildExps().size());
for (int sortKey = 0; sortKey < sort.getChildExps().size(); sortKey++) {
final RexNode sortExpression = sort.getChildExps().get(sortKey);
final RelFieldCollation collation = sort.getCollation().getFieldCollations().get(sortKey);
final OrderByColumnSpec.Direction direction;
final StringComparator comparator;
if (collation.getDirection() == RelFieldCollation.Direction.ASCENDING) {
direction = OrderByColumnSpec.Direction.ASCENDING;
} else if (collation.getDirection() == RelFieldCollation.Direction.DESCENDING) {
direction = OrderByColumnSpec.Direction.DESCENDING;
} else {
throw new ISE("Don't know what to do with direction[%s]", collation.getDirection());
}
final SqlTypeName sortExpressionType = sortExpression.getType().getSqlTypeName();
if (SqlTypeName.NUMERIC_TYPES.contains(sortExpressionType) || SqlTypeName.TIMESTAMP == sortExpressionType || SqlTypeName.DATE == sortExpressionType) {
comparator = StringComparators.NUMERIC;
} else {
comparator = StringComparators.LEXICOGRAPHIC;
}
if (sortExpression.isA(SqlKind.INPUT_REF)) {
final RexInputRef ref = (RexInputRef) sortExpression;
final String fieldName = rowSignature.getColumnName(ref.getIndex());
orderBys.add(new OrderByColumnSpec(fieldName, direction, comparator));
} else {
// We don't support sorting by anything other than refs which actually appear in the query result.
throw new CannotBuildQueryException(sort, sortExpression);
}
}
// Extract any post-sort Projection.
final Projection projection;
if (sortProject == null) {
projection = null;
} else if (partialQuery.getAggregate() == null) {
if (virtualColumnRegistry == null) {
throw new ISE("Must provide 'virtualColumnRegistry' for pre-aggregation Projection!");
}
projection = Projection.preAggregation(sortProject, plannerContext, rowSignature, virtualColumnRegistry);
} else {
projection = Projection.postAggregation(sortProject, plannerContext, rowSignature, "s");
}
return Sorting.create(orderBys, offsetLimit, projection);
}
use of org.apache.calcite.rel.core.Project in project druid by druid-io.
the class PartialDruidQuery method withSelectProject.
public PartialDruidQuery withSelectProject(final Project newSelectProject) {
validateStage(Stage.SELECT_PROJECT);
// Possibly merge together two projections.
final Project theProject;
if (selectProject == null) {
theProject = newSelectProject;
} else {
final List<RexNode> newProjectRexNodes = RelOptUtil.pushPastProject(newSelectProject.getProjects(), selectProject);
if (RexUtil.isIdentity(newProjectRexNodes, selectProject.getInput().getRowType())) {
// The projection is gone.
theProject = null;
} else {
final RelBuilder relBuilder = builderSupplier.get();
relBuilder.push(selectProject.getInput());
relBuilder.project(newProjectRexNodes, newSelectProject.getRowType().getFieldNames(), true);
theProject = (Project) relBuilder.build();
}
}
return new PartialDruidQuery(builderSupplier, scan, whereFilter, theProject, aggregate, aggregateProject, havingFilter, sort, sortProject);
}
use of org.apache.calcite.rel.core.Project in project druid by druid-io.
the class DruidJoinRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final Join join = call.rel(0);
final DruidRel<?> left = call.rel(1);
final DruidRel<?> right = call.rel(2);
final RexBuilder rexBuilder = join.getCluster().getRexBuilder();
final DruidRel<?> newLeft;
final DruidRel<?> newRight;
final Filter leftFilter;
final List<RexNode> newProjectExprs = new ArrayList<>();
// Already verified to be present in "matches", so just call "get".
// Can't be final, because we're going to reassign it up to a couple of times.
ConditionAnalysis conditionAnalysis = analyzeCondition(join.getCondition(), join.getLeft().getRowType(), right).get();
final boolean isLeftDirectAccessPossible = enableLeftScanDirect && (left instanceof DruidQueryRel);
if (left.getPartialDruidQuery().stage() == PartialDruidQuery.Stage.SELECT_PROJECT && (isLeftDirectAccessPossible || left.getPartialDruidQuery().getWhereFilter() == null)) {
// Swap the left-side projection above the join, so the left side is a simple scan or mapping. This helps us
// avoid subqueries.
final RelNode leftScan = left.getPartialDruidQuery().getScan();
final Project leftProject = left.getPartialDruidQuery().getSelectProject();
leftFilter = left.getPartialDruidQuery().getWhereFilter();
// Left-side projection expressions rewritten to be on top of the join.
newProjectExprs.addAll(leftProject.getProjects());
newLeft = left.withPartialQuery(PartialDruidQuery.create(leftScan));
conditionAnalysis = conditionAnalysis.pushThroughLeftProject(leftProject);
} else {
// Leave left as-is. Write input refs that do nothing.
for (int i = 0; i < left.getRowType().getFieldCount(); i++) {
newProjectExprs.add(rexBuilder.makeInputRef(join.getRowType().getFieldList().get(i).getType(), i));
}
newLeft = left;
leftFilter = null;
}
if (right.getPartialDruidQuery().stage() == PartialDruidQuery.Stage.SELECT_PROJECT && right.getPartialDruidQuery().getWhereFilter() == null && !right.getPartialDruidQuery().getSelectProject().isMapping() && conditionAnalysis.onlyUsesMappingsFromRightProject(right.getPartialDruidQuery().getSelectProject())) {
// Swap the right-side projection above the join, so the right side is a simple scan or mapping. This helps us
// avoid subqueries.
final RelNode rightScan = right.getPartialDruidQuery().getScan();
final Project rightProject = right.getPartialDruidQuery().getSelectProject();
// Right-side projection expressions rewritten to be on top of the join.
for (final RexNode rexNode : RexUtil.shift(rightProject.getProjects(), newLeft.getRowType().getFieldCount())) {
if (join.getJoinType().generatesNullsOnRight()) {
newProjectExprs.add(makeNullableIfLiteral(rexNode, rexBuilder));
} else {
newProjectExprs.add(rexNode);
}
}
newRight = right.withPartialQuery(PartialDruidQuery.create(rightScan));
conditionAnalysis = conditionAnalysis.pushThroughRightProject(rightProject);
} else {
// Leave right as-is. Write input refs that do nothing.
for (int i = 0; i < right.getRowType().getFieldCount(); i++) {
newProjectExprs.add(rexBuilder.makeInputRef(join.getRowType().getFieldList().get(left.getRowType().getFieldCount() + i).getType(), newLeft.getRowType().getFieldCount() + i));
}
newRight = right;
}
// Druid join written on top of the new left and right sides.
final DruidJoinQueryRel druidJoin = DruidJoinQueryRel.create(join.copy(join.getTraitSet(), conditionAnalysis.getCondition(rexBuilder), newLeft, newRight, join.getJoinType(), join.isSemiJoinDone()), leftFilter, left.getPlannerContext());
final RelBuilder relBuilder = call.builder().push(druidJoin).project(RexUtil.fixUp(rexBuilder, newProjectExprs, RelOptUtil.getFieldTypeList(druidJoin.getRowType())));
call.transformTo(relBuilder.build());
}
use of org.apache.calcite.rel.core.Project in project druid by druid-io.
the class ArrayConcatSqlAggregator method toDruidAggregation.
@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
final List<RexNode> arguments = aggregateCall.getArgList().stream().map(i -> Expressions.fromFieldAccess(rowSignature, project, i)).collect(Collectors.toList());
Integer maxSizeBytes = null;
if (arguments.size() > 1) {
RexNode maxBytes = arguments.get(1);
if (!maxBytes.isA(SqlKind.LITERAL)) {
// maxBytes must be a literal
return null;
}
maxSizeBytes = ((Number) RexLiteral.value(maxBytes)).intValue();
}
final DruidExpression arg = Expressions.toDruidExpression(plannerContext, rowSignature, arguments.get(0));
final ExprMacroTable macroTable = plannerContext.getExprMacroTable();
final String fieldName;
final ColumnType druidType = Calcites.getValueTypeForRelDataTypeFull(aggregateCall.getType());
if (druidType == null || !druidType.isArray()) {
// must be an array
return null;
}
final String initialvalue = ExpressionType.fromColumnTypeStrict(druidType).asTypeString() + "[]";
if (arg.isDirectColumnAccess()) {
fieldName = arg.getDirectColumn();
} else {
VirtualColumn vc = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(plannerContext, arg, druidType);
fieldName = vc.getOutputName();
}
if (aggregateCall.isDistinct()) {
return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
} else {
return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_concat(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_concat(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
}
}
use of org.apache.calcite.rel.core.Project in project hazelcast by hazelcast.
the class ProjectIntoScanLogicalRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
Project project = call.rel(0);
FullScanLogicalRel scan = call.rel(1);
HazelcastTable originalTable = OptUtils.extractHazelcastTable(scan);
List<RexNode> newProjects = inlineExpressions(originalTable.getProjects(), project.getProjects());
HazelcastRelOptTable convertedTable = OptUtils.createRelTable((HazelcastRelOptTable) scan.getTable(), originalTable.withProject(newProjects, project.getRowType()), scan.getCluster().getTypeFactory());
FullScanLogicalRel rel = new FullScanLogicalRel(scan.getCluster(), OptUtils.toLogicalConvention(scan.getTraitSet()), convertedTable, scan.eventTimePolicyProvider(), scan.watermarkedColumnIndex());
call.transformTo(rel);
}
Aggregations