use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project hazelcast by hazelcast.
the class ProjectIntoScanLogicalRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
Project project = call.rel(0);
FullScanLogicalRel scan = call.rel(1);
HazelcastTable originalTable = OptUtils.extractHazelcastTable(scan);
List<RexNode> newProjects = inlineExpressions(originalTable.getProjects(), project.getProjects());
HazelcastRelOptTable convertedTable = OptUtils.createRelTable((HazelcastRelOptTable) scan.getTable(), originalTable.withProject(newProjects, project.getRowType()), scan.getCluster().getTypeFactory());
FullScanLogicalRel rel = new FullScanLogicalRel(scan.getCluster(), OptUtils.toLogicalConvention(scan.getTraitSet()), convertedTable, scan.eventTimePolicyProvider(), scan.watermarkedColumnIndex());
call.transformTo(rel);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project druid by druid-io.
the class QuantileSqlAggregator method toDruidAggregation.
@Nullable
@Override
public Aggregation toDruidAggregation(final PlannerContext plannerContext, final RowSignature rowSignature, final VirtualColumnRegistry virtualColumnRegistry, final RexBuilder rexBuilder, final String name, final AggregateCall aggregateCall, final Project project, final List<Aggregation> existingAggregations, final boolean finalizeAggregations) {
final DruidExpression input = Aggregations.toDruidExpressionForNumericAggregator(plannerContext, rowSignature, Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(0)));
if (input == null) {
return null;
}
final AggregatorFactory aggregatorFactory;
final String histogramName = StringUtils.format("%s:agg", name);
final RexNode probabilityArg = Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(1));
if (!probabilityArg.isA(SqlKind.LITERAL)) {
// Probability must be a literal in order to plan.
return null;
}
final float probability = ((Number) RexLiteral.value(probabilityArg)).floatValue();
final int resolution;
if (aggregateCall.getArgList().size() >= 3) {
final RexNode resolutionArg = Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(2));
if (!resolutionArg.isA(SqlKind.LITERAL)) {
// Resolution must be a literal in order to plan.
return null;
}
resolution = ((Number) RexLiteral.value(resolutionArg)).intValue();
} else {
resolution = ApproximateHistogram.DEFAULT_HISTOGRAM_SIZE;
}
final int numBuckets = ApproximateHistogram.DEFAULT_BUCKET_SIZE;
final float lowerLimit = Float.NEGATIVE_INFINITY;
final float upperLimit = Float.POSITIVE_INFINITY;
// Look for existing matching aggregatorFactory.
for (final Aggregation existing : existingAggregations) {
for (AggregatorFactory factory : existing.getAggregatorFactories()) {
if (factory instanceof ApproximateHistogramAggregatorFactory) {
final ApproximateHistogramAggregatorFactory theFactory = (ApproximateHistogramAggregatorFactory) factory;
// Check input for equivalence.
final boolean inputMatches;
final DruidExpression virtualInput = virtualColumnRegistry.findVirtualColumnExpressions(theFactory.requiredFields()).stream().findFirst().orElse(null);
if (virtualInput == null) {
inputMatches = input.isDirectColumnAccess() && input.getDirectColumn().equals(theFactory.getFieldName());
} else {
inputMatches = virtualInput.equals(input);
}
final boolean matches = inputMatches && theFactory.getResolution() == resolution && theFactory.getNumBuckets() == numBuckets && theFactory.getLowerLimit() == lowerLimit && theFactory.getUpperLimit() == upperLimit;
if (matches) {
// Found existing one. Use this.
return Aggregation.create(ImmutableList.of(), new QuantilePostAggregator(name, factory.getName(), probability));
}
}
}
}
// No existing match found. Create a new one.
if (input.isDirectColumnAccess()) {
if (rowSignature.getColumnType(input.getDirectColumn()).map(type -> type.is(ValueType.COMPLEX)).orElse(false)) {
aggregatorFactory = new ApproximateHistogramFoldingAggregatorFactory(histogramName, input.getDirectColumn(), resolution, numBuckets, lowerLimit, upperLimit, false);
} else {
aggregatorFactory = new ApproximateHistogramAggregatorFactory(histogramName, input.getDirectColumn(), resolution, numBuckets, lowerLimit, upperLimit, false);
}
} else {
final String virtualColumnName = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(input, ColumnType.FLOAT);
aggregatorFactory = new ApproximateHistogramAggregatorFactory(histogramName, virtualColumnName, resolution, numBuckets, lowerLimit, upperLimit, false);
}
return Aggregation.create(ImmutableList.of(aggregatorFactory), new QuantilePostAggregator(name, histogramName, probability));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project hive by apache.
the class PlanModifierForASTConv method validSortChild.
private static boolean validSortChild(HiveSortLimit sortNode) {
boolean validChild = true;
RelNode child = sortNode.getInput();
if (!(child instanceof Project) && !(HiveCalciteUtil.pureLimitRelNode(sortNode) && HiveCalciteUtil.pureOrderRelNode(child))) {
validChild = false;
}
return validChild;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project hive by apache.
the class PlanModifierForASTConv method replaceEmptyGroupAggr.
private static void replaceEmptyGroupAggr(final RelNode rel, RelNode parent) {
// If this function is called, the parent should only include constant
List<RexNode> exps = parent instanceof Project ? ((Project) parent).getProjects() : Collections.emptyList();
for (RexNode rexNode : exps) {
if (!rexNode.accept(new HiveCalciteUtil.ConstantFinder())) {
throw new RuntimeException("We expect " + parent.toString() + " to contain only constants. However, " + rexNode.toString() + " is " + rexNode.getKind());
}
}
HiveAggregate oldAggRel = (HiveAggregate) rel;
RelDataTypeFactory typeFactory = oldAggRel.getCluster().getTypeFactory();
RelDataType longType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, typeFactory);
RelDataType intType = TypeConverter.convert(TypeInfoFactory.intTypeInfo, typeFactory);
// Create the dummy aggregation.
SqlAggFunction countFn = SqlFunctionConverter.getCalciteAggFn("count", false, ImmutableList.of(intType), longType);
// TODO: Using 0 might be wrong; might need to walk down to find the
// proper index of a dummy.
List<Integer> argList = ImmutableList.of(0);
AggregateCall dummyCall = new AggregateCall(countFn, false, argList, longType, null);
Aggregate newAggRel = oldAggRel.copy(oldAggRel.getTraitSet(), oldAggRel.getInput(), oldAggRel.indicator, oldAggRel.getGroupSet(), oldAggRel.getGroupSets(), ImmutableList.of(dummyCall));
RelNode select = introduceDerivedTable(newAggRel);
parent.replaceInput(0, select);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project hive by apache.
the class PlanModifierUtil method fixTopOBSchema.
protected static void fixTopOBSchema(final RelNode rootRel, Pair<RelNode, RelNode> topSelparentPair, List<FieldSchema> resultSchema, boolean replaceProject) throws CalciteSemanticException {
if (!(topSelparentPair.getKey() instanceof Sort) || !HiveCalciteUtil.orderRelNode(topSelparentPair.getKey())) {
return;
}
HiveSortLimit obRel = (HiveSortLimit) topSelparentPair.getKey();
Project obChild = (Project) topSelparentPair.getValue();
if (obChild.getRowType().getFieldCount() <= resultSchema.size()) {
return;
}
RelDataType rt = obChild.getRowType();
@SuppressWarnings({ "unchecked", "rawtypes" }) Set<Integer> collationInputRefs = new HashSet(RelCollations.ordinals(obRel.getCollation()));
ImmutableMap.Builder<Integer, RexNode> inputRefToCallMapBldr = ImmutableMap.builder();
for (int i = resultSchema.size(); i < rt.getFieldCount(); i++) {
if (collationInputRefs.contains(i)) {
RexNode obyExpr = obChild.getProjects().get(i);
if (obyExpr instanceof RexCall) {
LOG.debug("Old RexCall : " + obyExpr);
obyExpr = adjustOBSchema((RexCall) obyExpr, obChild, resultSchema);
LOG.debug("New RexCall : " + obyExpr);
}
inputRefToCallMapBldr.put(i, obyExpr);
}
}
ImmutableMap<Integer, RexNode> inputRefToCallMap = inputRefToCallMapBldr.build();
if ((obChild.getRowType().getFieldCount() - inputRefToCallMap.size()) != resultSchema.size()) {
LOG.error(generateInvalidSchemaMessage(obChild, resultSchema, inputRefToCallMap.size()));
throw new CalciteSemanticException("Result Schema didn't match Optimized Op Tree Schema");
}
if (replaceProject) {
// This removes order-by only expressions from the projections.
HiveProject replacementProjectRel = HiveProject.create(obChild.getInput(), obChild.getProjects().subList(0, resultSchema.size()), obChild.getRowType().getFieldNames().subList(0, resultSchema.size()));
obRel.replaceInput(0, replacementProjectRel);
}
obRel.setInputRefToCallMap(inputRefToCallMap);
}
Aggregations