use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class CalciteInsertDmlTest method testInsertWithPartitionedBy.
@Test
public void testInsertWithPartitionedBy() {
// Test correctness of the query when only PARTITIONED BY clause is present
RowSignature targetRowSignature = RowSignature.builder().add("__time", ColumnType.LONG).add("floor_m1", ColumnType.FLOAT).add("dim1", ColumnType.STRING).build();
testInsertQuery().sql("INSERT INTO druid.dst SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo PARTITIONED BY TIME_FLOOR(__time, 'PT1H')").expectTarget("dst", targetRowSignature).expectResources(dataSourceRead("foo"), dataSourceWrite("dst")).expectQuery(newScanQueryBuilder().dataSource("foo").intervals(querySegmentSpec(Filtration.eternity())).columns("__time", "dim1", "v0").virtualColumns(expressionVirtualColumn("v0", "floor(\"m1\")", ColumnType.FLOAT)).context(queryContextWithGranularity(Granularities.HOUR)).build()).verify();
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class DruidJoinQueryRel method toDruidQuery.
@Override
public DruidQuery toDruidQuery(final boolean finalizeAggregations) {
final DruidRel<?> leftDruidRel = (DruidRel<?>) left;
final DruidQuery leftQuery = Preconditions.checkNotNull((leftDruidRel).toDruidQuery(false), "leftQuery");
final RowSignature leftSignature = leftQuery.getOutputRowSignature();
final DataSource leftDataSource;
final DruidRel<?> rightDruidRel = (DruidRel<?>) right;
final DruidQuery rightQuery = Preconditions.checkNotNull(rightDruidRel.toDruidQuery(false), "rightQuery");
final RowSignature rightSignature = rightQuery.getOutputRowSignature();
final DataSource rightDataSource;
if (computeLeftRequiresSubquery(leftDruidRel)) {
leftDataSource = new QueryDataSource(leftQuery.getQuery());
if (leftFilter != null) {
throw new ISE("Filter on left table is supposed to be null if left child is a query source");
}
} else {
leftDataSource = leftQuery.getDataSource();
}
if (computeRightRequiresSubquery(rightDruidRel)) {
rightDataSource = new QueryDataSource(rightQuery.getQuery());
} else {
rightDataSource = rightQuery.getDataSource();
}
final Pair<String, RowSignature> prefixSignaturePair = computeJoinRowSignature(leftSignature, rightSignature);
VirtualColumnRegistry virtualColumnRegistry = VirtualColumnRegistry.create(prefixSignaturePair.rhs, getPlannerContext().getExprMacroTable());
getPlannerContext().setJoinExpressionVirtualColumnRegistry(virtualColumnRegistry);
// Generate the condition for this join as a Druid expression.
final DruidExpression condition = Expressions.toDruidExpression(getPlannerContext(), prefixSignaturePair.rhs, joinRel.getCondition());
// Unsetting it to avoid any VC Registry leaks incase there are multiple druid quries for the SQL
// It should be fixed soon with changes in interface for SqlOperatorConversion and Expressions bridge class
getPlannerContext().setJoinExpressionVirtualColumnRegistry(null);
// quiets static code analysis.
if (condition == null) {
throw new CannotBuildQueryException(joinRel, joinRel.getCondition());
}
return partialQuery.build(JoinDataSource.create(leftDataSource, rightDataSource, prefixSignaturePair.lhs, condition.getExpression(), toDruidJoinType(joinRel.getJoinType()), getDimFilter(getPlannerContext(), leftSignature, leftFilter), getPlannerContext().getExprMacroTable()), prefixSignaturePair.rhs, getPlannerContext(), getCluster().getRexBuilder(), finalizeAggregations, virtualColumnRegistry);
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class ExternalTableMacro method apply.
@Override
public TranslatableTable apply(final List<Object> arguments) {
try {
final InputSource inputSource = jsonMapper.readValue((String) arguments.get(0), InputSource.class);
final InputFormat inputFormat = jsonMapper.readValue((String) arguments.get(1), InputFormat.class);
final RowSignature signature = jsonMapper.readValue((String) arguments.get(2), RowSignature.class);
return new DruidTable(new ExternalDataSource(inputSource, inputFormat, signature), signature, jsonMapper, false, false);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class TimeFloorOperatorConversion method toTimestampFloorOrCeilArgs.
/**
* Function that converts SQL TIME_FLOOR or TIME_CEIL args to Druid expression "timestamp_floor" or "timestamp_ceil"
* args. The main reason this function is necessary is because the handling of origin and timezone must take into
* account the SQL context timezone. It also helps with handling SQL FLOOR and CEIL, by offering handling of
* TimeUnitRange args.
*/
@Nullable
public static List<DruidExpression> toTimestampFloorOrCeilArgs(final PlannerContext plannerContext, final RowSignature rowSignature, final List<RexNode> operands) {
final List<DruidExpression> functionArgs = new ArrayList<>();
// Timestamp
functionArgs.add(Expressions.toDruidExpression(plannerContext, rowSignature, operands.get(0)));
// Period
final RexNode periodOperand = operands.get(1);
if (periodOperand.isA(SqlKind.LITERAL) && RexLiteral.value(periodOperand) instanceof TimeUnitRange) {
// TimeUnitRange literals are used by FLOOR(t TO unit) and CEIL(t TO unit)
final Period period = TimeUnits.toPeriod((TimeUnitRange) RexLiteral.value(periodOperand));
if (period == null) {
// Unrecognized time unit, bail out.
return null;
}
functionArgs.add(DruidExpression.ofStringLiteral(period.toString()));
} else {
// Other literal types are used by TIME_FLOOR and TIME_CEIL
functionArgs.add(Expressions.toDruidExpression(plannerContext, rowSignature, periodOperand));
}
// Origin
functionArgs.add(OperatorConversions.getOperandWithDefault(operands, 2, operand -> {
if (operand.isA(SqlKind.LITERAL)) {
return DruidExpression.ofLiteral(Calcites.getColumnTypeForRelDataType(operand.getType()), DruidExpression.numberLiteral(Calcites.calciteDateTimeLiteralToJoda(operand, plannerContext.getTimeZone()).getMillis()));
} else {
return Expressions.toDruidExpression(plannerContext, rowSignature, operand);
}
}, DruidExpression.ofLiteral(null, DruidExpression.nullLiteral())));
// Time zone
functionArgs.add(OperatorConversions.getOperandWithDefault(operands, 3, operand -> Expressions.toDruidExpression(plannerContext, rowSignature, operand), DruidExpression.ofStringLiteral(plannerContext.getTimeZone().getID())));
return functionArgs.stream().noneMatch(Objects::isNull) ? functionArgs : null;
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class DruidSchemaTest method testSegmentMetadataFallbackType.
@Test
public void testSegmentMetadataFallbackType() {
RowSignature signature = DruidSchema.analysisToRowSignature(new SegmentAnalysis("id", ImmutableList.of(Intervals.utc(1L, 2L)), ImmutableMap.of("a", new ColumnAnalysis(null, ColumnType.STRING.asTypeString(), false, true, 1234, 26, "a", "z", null), "count", new ColumnAnalysis(null, ColumnType.LONG.asTypeString(), false, true, 1234, 26, "a", "z", null)), 1234, 100, null, null, null, null));
Assert.assertEquals(RowSignature.builder().add("a", ColumnType.STRING).add("count", ColumnType.LONG).build(), signature);
}
Aggregations