use of org.apache.druid.sql.calcite.planner.PlannerContext in project druid by druid-io.
the class ExternalTableScanRuleTest method testMatchesWhenExternalScanUnsupported.
@Test
public void testMatchesWhenExternalScanUnsupported() throws ValidationException {
final PlannerContext plannerContext = PlannerContext.create(// The actual query isn't important for this test
"DUMMY", CalciteTests.createOperatorTable(), CalciteTests.createExprMacroTable(), CalciteTests.getJsonMapper(), new PlannerConfig(), new DruidSchemaCatalog(EasyMock.createMock(SchemaPlus.class), ImmutableMap.of("druid", new NamedDruidSchema(EasyMock.createMock(DruidSchema.class), "druid"), NamedViewSchema.NAME, new NamedViewSchema(EasyMock.createMock(ViewSchema.class)))), ImmutableMap.of());
plannerContext.setQueryMaker(CalciteTests.createMockQueryMakerFactory(EasyMock.createMock(QuerySegmentWalker.class), EasyMock.createMock(QueryRunnerFactoryConglomerate.class)).buildForSelect(EasyMock.createMock(RelRoot.class), plannerContext));
ExternalTableScanRule rule = new ExternalTableScanRule(plannerContext);
rule.matches(EasyMock.createMock(RelOptRuleCall.class));
Assert.assertEquals("SQL query requires scanning external datasources that is not suported.", plannerContext.getPlanningError());
}
use of org.apache.druid.sql.calcite.planner.PlannerContext in project druid by druid-io.
the class DruidJoinRuleTest method setup.
@Before
public void setup() {
PlannerContext plannerContext = Mockito.mock(PlannerContext.class);
druidJoinRule = DruidJoinRule.instance(plannerContext);
}
use of org.apache.druid.sql.calcite.planner.PlannerContext in project druid by druid-io.
the class ArrayConcatSqlAggregator method toDruidAggregation.
@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
final List<RexNode> arguments = aggregateCall.getArgList().stream().map(i -> Expressions.fromFieldAccess(rowSignature, project, i)).collect(Collectors.toList());
Integer maxSizeBytes = null;
if (arguments.size() > 1) {
RexNode maxBytes = arguments.get(1);
if (!maxBytes.isA(SqlKind.LITERAL)) {
// maxBytes must be a literal
return null;
}
maxSizeBytes = ((Number) RexLiteral.value(maxBytes)).intValue();
}
final DruidExpression arg = Expressions.toDruidExpression(plannerContext, rowSignature, arguments.get(0));
final ExprMacroTable macroTable = plannerContext.getExprMacroTable();
final String fieldName;
final ColumnType druidType = Calcites.getValueTypeForRelDataTypeFull(aggregateCall.getType());
if (druidType == null || !druidType.isArray()) {
// must be an array
return null;
}
final String initialvalue = ExpressionType.fromColumnTypeStrict(druidType).asTypeString() + "[]";
if (arg.isDirectColumnAccess()) {
fieldName = arg.getDirectColumn();
} else {
VirtualColumn vc = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(plannerContext, arg, druidType);
fieldName = vc.getOutputName();
}
if (aggregateCall.isDistinct()) {
return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
} else {
return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_concat(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_concat(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
}
}
use of org.apache.druid.sql.calcite.planner.PlannerContext in project druid by druid-io.
the class TimeArithmeticOperatorConversion method toDruidExpression.
@Override
public DruidExpression toDruidExpression(final PlannerContext plannerContext, final RowSignature rowSignature, final RexNode rexNode) {
final RexCall call = (RexCall) rexNode;
final List<RexNode> operands = call.getOperands();
if (operands.size() != 2) {
throw new IAE("Expected 2 args, got %s", operands.size());
}
final RexNode leftRexNode = operands.get(0);
final RexNode rightRexNode = operands.get(1);
final DruidExpression leftExpr = Expressions.toDruidExpression(plannerContext, rowSignature, leftRexNode);
final DruidExpression rightExpr = Expressions.toDruidExpression(plannerContext, rowSignature, rightRexNode);
if (leftExpr == null || rightExpr == null) {
return null;
}
final ColumnType outputType = Calcites.getColumnTypeForRelDataType(rexNode.getType());
if (rightRexNode.getType().getFamily() == SqlTypeFamily.INTERVAL_YEAR_MONTH) {
// Period is a value in months.
return DruidExpression.ofExpression(outputType, DruidExpression.functionCall("timestamp_shift"), ImmutableList.of(leftExpr, rightExpr.map(simpleExtraction -> null, expression -> rightRexNode.isA(SqlKind.LITERAL) ? StringUtils.format("'P%sM'", RexLiteral.value(rightRexNode)) : StringUtils.format("concat('P', %s, 'M')", expression)), DruidExpression.ofLiteral(ColumnType.LONG, DruidExpression.numberLiteral(direction > 0 ? 1 : -1)), DruidExpression.ofStringLiteral(plannerContext.getTimeZone().getID())));
} else if (rightRexNode.getType().getFamily() == SqlTypeFamily.INTERVAL_DAY_TIME) {
// Period is a value in milliseconds. Ignore time zone.
return DruidExpression.ofExpression(outputType, (args) -> StringUtils.format("(%s %s %s)", args.get(0).getExpression(), direction > 0 ? "+" : "-", args.get(1).getExpression()), ImmutableList.of(leftExpr, rightExpr));
} else if ((leftRexNode.getType().getFamily() == SqlTypeFamily.TIMESTAMP || leftRexNode.getType().getFamily() == SqlTypeFamily.DATE) && (rightRexNode.getType().getFamily() == SqlTypeFamily.TIMESTAMP || rightRexNode.getType().getFamily() == SqlTypeFamily.DATE)) {
// Calcite represents both TIMESTAMP - INTERVAL and TIMESTAMPDIFF (TIMESTAMP - TIMESTAMP)
// with a MINUS_DATE operator, so we must tell which case we're in by checking the type of
// the second argument.
Preconditions.checkState(direction < 0, "Time arithmetic require direction < 0");
if (call.getType().getFamily() == SqlTypeFamily.INTERVAL_YEAR_MONTH) {
return DruidExpression.ofExpression(outputType, DruidExpression.functionCall("subtract_months"), ImmutableList.of(leftExpr, rightExpr, DruidExpression.ofStringLiteral(plannerContext.getTimeZone().getID())));
} else {
return DruidExpression.ofExpression(outputType, (args) -> StringUtils.format("(%s %s %s)", args.get(0).getExpression(), "-", args.get(1).getExpression()), ImmutableList.of(leftExpr, rightExpr));
}
} else {
// Shouldn't happen if subclasses are behaving.
throw new ISE("Got unexpected type period type family[%s]", rightRexNode.getType().getFamily());
}
}
use of org.apache.druid.sql.calcite.planner.PlannerContext in project druid by druid-io.
the class QuantileSqlAggregator method toDruidAggregation.
@Nullable
@Override
public Aggregation toDruidAggregation(final PlannerContext plannerContext, final RowSignature rowSignature, final VirtualColumnRegistry virtualColumnRegistry, final RexBuilder rexBuilder, final String name, final AggregateCall aggregateCall, final Project project, final List<Aggregation> existingAggregations, final boolean finalizeAggregations) {
final DruidExpression input = Aggregations.toDruidExpressionForNumericAggregator(plannerContext, rowSignature, Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(0)));
if (input == null) {
return null;
}
final AggregatorFactory aggregatorFactory;
final String histogramName = StringUtils.format("%s:agg", name);
final RexNode probabilityArg = Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(1));
if (!probabilityArg.isA(SqlKind.LITERAL)) {
// Probability must be a literal in order to plan.
return null;
}
final float probability = ((Number) RexLiteral.value(probabilityArg)).floatValue();
final int resolution;
if (aggregateCall.getArgList().size() >= 3) {
final RexNode resolutionArg = Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(2));
if (!resolutionArg.isA(SqlKind.LITERAL)) {
// Resolution must be a literal in order to plan.
return null;
}
resolution = ((Number) RexLiteral.value(resolutionArg)).intValue();
} else {
resolution = ApproximateHistogram.DEFAULT_HISTOGRAM_SIZE;
}
final int numBuckets = ApproximateHistogram.DEFAULT_BUCKET_SIZE;
final float lowerLimit = Float.NEGATIVE_INFINITY;
final float upperLimit = Float.POSITIVE_INFINITY;
// Look for existing matching aggregatorFactory.
for (final Aggregation existing : existingAggregations) {
for (AggregatorFactory factory : existing.getAggregatorFactories()) {
if (factory instanceof ApproximateHistogramAggregatorFactory) {
final ApproximateHistogramAggregatorFactory theFactory = (ApproximateHistogramAggregatorFactory) factory;
// Check input for equivalence.
final boolean inputMatches;
final DruidExpression virtualInput = virtualColumnRegistry.findVirtualColumnExpressions(theFactory.requiredFields()).stream().findFirst().orElse(null);
if (virtualInput == null) {
inputMatches = input.isDirectColumnAccess() && input.getDirectColumn().equals(theFactory.getFieldName());
} else {
inputMatches = virtualInput.equals(input);
}
final boolean matches = inputMatches && theFactory.getResolution() == resolution && theFactory.getNumBuckets() == numBuckets && theFactory.getLowerLimit() == lowerLimit && theFactory.getUpperLimit() == upperLimit;
if (matches) {
// Found existing one. Use this.
return Aggregation.create(ImmutableList.of(), new QuantilePostAggregator(name, factory.getName(), probability));
}
}
}
}
// No existing match found. Create a new one.
if (input.isDirectColumnAccess()) {
if (rowSignature.getColumnType(input.getDirectColumn()).map(type -> type.is(ValueType.COMPLEX)).orElse(false)) {
aggregatorFactory = new ApproximateHistogramFoldingAggregatorFactory(histogramName, input.getDirectColumn(), resolution, numBuckets, lowerLimit, upperLimit, false);
} else {
aggregatorFactory = new ApproximateHistogramAggregatorFactory(histogramName, input.getDirectColumn(), resolution, numBuckets, lowerLimit, upperLimit, false);
}
} else {
final String virtualColumnName = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(input, ColumnType.FLOAT);
aggregatorFactory = new ApproximateHistogramAggregatorFactory(histogramName, virtualColumnName, resolution, numBuckets, lowerLimit, upperLimit, false);
}
return Aggregation.create(ImmutableList.of(aggregatorFactory), new QuantilePostAggregator(name, histogramName, probability));
}
Aggregations