use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class CalciteInsertDmlTest method testInsertWithPartitionedByAndClusteredBy.
@Test
public void testInsertWithPartitionedByAndClusteredBy() {
// Test correctness of the query when both PARTITIONED BY and CLUSTERED BY clause is present
RowSignature targetRowSignature = RowSignature.builder().add("__time", ColumnType.LONG).add("floor_m1", ColumnType.FLOAT).add("dim1", ColumnType.STRING).build();
testInsertQuery().sql("INSERT INTO druid.dst SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo PARTITIONED BY DAY CLUSTERED BY 2, dim1").expectTarget("dst", targetRowSignature).expectResources(dataSourceRead("foo"), dataSourceWrite("dst")).expectQuery(newScanQueryBuilder().dataSource("foo").intervals(querySegmentSpec(Filtration.eternity())).columns("__time", "dim1", "v0").virtualColumns(expressionVirtualColumn("v0", "floor(\"m1\")", ColumnType.FLOAT)).orderBy(ImmutableList.of(new ScanQuery.OrderBy("v0", ScanQuery.Order.ASCENDING), new ScanQuery.OrderBy("dim1", ScanQuery.Order.ASCENDING))).context(queryContextWithGranularity(Granularities.DAY)).build()).verify();
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class CalciteInsertDmlTest method testPartitionedBySupportedClauses.
@Test
public void testPartitionedBySupportedClauses() {
RowSignature targetRowSignature = RowSignature.builder().add("__time", ColumnType.LONG).add("dim1", ColumnType.STRING).build();
Map<String, Granularity> partitionedByArgumentToGranularityMap = ImmutableMap.<String, Granularity>builder().put("HOUR", Granularities.HOUR).put("DAY", Granularities.DAY).put("MONTH", Granularities.MONTH).put("YEAR", Granularities.YEAR).put("ALL", Granularities.ALL).put("ALL TIME", Granularities.ALL).put("FLOOR(__time TO QUARTER)", Granularities.QUARTER).put("TIME_FLOOR(__time, 'PT1H')", Granularities.HOUR).build();
partitionedByArgumentToGranularityMap.forEach((partitionedByArgument, expectedGranularity) -> {
Map<String, Object> queryContext = null;
try {
queryContext = ImmutableMap.of(DruidSqlInsert.SQL_INSERT_SEGMENT_GRANULARITY, queryJsonMapper.writeValueAsString(expectedGranularity));
} catch (JsonProcessingException e) {
// Won't reach here
Assert.fail(e.getMessage());
}
testInsertQuery().sql(StringUtils.format("INSERT INTO druid.dst SELECT __time, dim1 FROM foo PARTITIONED BY %s", partitionedByArgument)).expectTarget("dst", targetRowSignature).expectResources(dataSourceRead("foo"), dataSourceWrite("dst")).expectQuery(newScanQueryBuilder().dataSource("foo").intervals(querySegmentSpec(Filtration.eternity())).columns("__time", "dim1").context(queryContext).build()).verify();
didTest = false;
});
didTest = true;
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class ObjectWriter method writeHeader.
static void writeHeader(final JsonGenerator jsonGenerator, final RelDataType rowType, final boolean includeTypes, final boolean includeSqlTypes) throws IOException {
final RowSignature signature = RowSignatures.fromRelDataType(rowType.getFieldNames(), rowType);
jsonGenerator.writeStartObject();
for (int i = 0; i < signature.size(); i++) {
jsonGenerator.writeFieldName(signature.getColumnName(i));
if (!includeTypes && !includeSqlTypes) {
jsonGenerator.writeNull();
} else {
jsonGenerator.writeStartObject();
if (includeTypes) {
jsonGenerator.writeStringField(ObjectWriter.TYPE_HEADER_NAME, signature.getColumnType(i).map(TypeSignature::asTypeString).orElse(null));
}
if (includeSqlTypes) {
jsonGenerator.writeStringField(ObjectWriter.SQL_TYPE_HEADER_NAME, rowType.getFieldList().get(i).getType().getSqlTypeName().getName());
}
jsonGenerator.writeEndObject();
}
}
jsonGenerator.writeEndObject();
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeseriesWithTimestampResultFieldContextForArrayResponse.
@Test
public void testTimeseriesWithTimestampResultFieldContextForArrayResponse() {
Granularity gran = Granularities.DAY;
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(gran).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(QueryRunnerTestHelper.ROWS_COUNT, QueryRunnerTestHelper.INDEX_DOUBLE_SUM, QueryRunnerTestHelper.QUALITY_UNIQUES).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext(ImmutableMap.of(TimeseriesQuery.CTX_TIMESTAMP_RESULT_FIELD, TIMESTAMP_RESULT_FIELD_NAME, TimeseriesQuery.SKIP_EMPTY_BUCKETS, true))).build();
Assert.assertEquals(TIMESTAMP_RESULT_FIELD_NAME, query.getTimestampResultField());
QueryToolChest<Result<TimeseriesResultValue>, TimeseriesQuery> toolChest = new TimeseriesQueryQueryToolChest();
RowSignature rowSignature = toolChest.resultArraySignature(query);
Assert.assertNotNull(rowSignature);
List<String> columnNames = rowSignature.getColumnNames();
Assert.assertNotNull(columnNames);
Assert.assertEquals(6, columnNames.size());
Assert.assertEquals("__time", columnNames.get(0));
Assert.assertEquals(TIMESTAMP_RESULT_FIELD_NAME, columnNames.get(1));
Assert.assertEquals("rows", columnNames.get(2));
Assert.assertEquals("index", columnNames.get(3));
Assert.assertEquals("uniques", columnNames.get(4));
Assert.assertEquals("addRowsIndexConstant", columnNames.get(5));
Sequence<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query));
Sequence<Object[]> resultsAsArrays = toolChest.resultsAsArrays(query, results);
Assert.assertNotNull(resultsAsArrays);
final String[] expectedIndex = descending ? QueryRunnerTestHelper.EXPECTED_FULL_ON_INDEX_VALUES_DESC : QueryRunnerTestHelper.EXPECTED_FULL_ON_INDEX_VALUES;
final String[] expectedIndexToUse = Arrays.stream(expectedIndex).filter(eachIndex -> !"0.0".equals(eachIndex)).toArray(String[]::new);
final Long expectedLast = descending ? QueryRunnerTestHelper.EARLIEST.getMillis() : QueryRunnerTestHelper.LAST.getMillis();
int count = 0;
Object[] lastResult = null;
for (Object[] result : resultsAsArrays.toList()) {
Long current = (Long) result[0];
Assert.assertFalse(StringUtils.format("Timestamp[%s] > expectedLast[%s]", current, expectedLast), descending ? current < expectedLast : current > expectedLast);
Assert.assertEquals((Long) result[1], current, 0);
Assert.assertEquals(QueryRunnerTestHelper.SKIPPED_DAY.getMillis() == current ? (Long) 0L : (Long) 13L, result[2]);
if (QueryRunnerTestHelper.SKIPPED_DAY.getMillis() != current) {
Assert.assertEquals(Doubles.tryParse(expectedIndexToUse[count]).doubleValue(), (Double) result[3], (Double) result[3] * 1e-6);
Assert.assertEquals((Double) result[4], 9.0d, 0.02);
Assert.assertEquals(new Double(expectedIndexToUse[count]) + 13L + 1L, (Double) result[5], (Double) result[5] * 1e-6);
} else {
if (NullHandling.replaceWithDefault()) {
Assert.assertEquals(0.0D, (Double) result[3], (Double) result[3] * 1e-6);
Assert.assertEquals(0.0D, (Double) result[4], 0.02);
Assert.assertEquals(new Double(expectedIndexToUse[count]) + 1L, (Double) result[5], (Double) result[5] * 1e-6);
} else {
Assert.assertNull(result[3]);
Assert.assertEquals((Double) result[4], 0.0, 0.02);
Assert.assertNull(result[5]);
}
}
lastResult = result;
++count;
}
Assert.assertEquals(expectedLast, lastResult[0]);
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class DruidQuery method computeGrouping.
@Nonnull
private static Grouping computeGrouping(final PartialDruidQuery partialQuery, final PlannerContext plannerContext, final RowSignature rowSignature, final VirtualColumnRegistry virtualColumnRegistry, final RexBuilder rexBuilder, final boolean finalizeAggregations) {
final Aggregate aggregate = Preconditions.checkNotNull(partialQuery.getAggregate(), "aggregate");
final Project aggregateProject = partialQuery.getAggregateProject();
final List<DimensionExpression> dimensions = computeDimensions(partialQuery, plannerContext, rowSignature, virtualColumnRegistry);
final Subtotals subtotals = computeSubtotals(partialQuery, rowSignature);
final List<Aggregation> aggregations = computeAggregations(partialQuery, plannerContext, rowSignature, virtualColumnRegistry, rexBuilder, finalizeAggregations);
final RowSignature aggregateRowSignature = RowSignatures.fromRelDataType(ImmutableList.copyOf(Iterators.concat(dimensions.stream().map(DimensionExpression::getOutputName).iterator(), aggregations.stream().map(Aggregation::getOutputName).iterator())), aggregate.getRowType());
final DimFilter havingFilter = computeHavingFilter(partialQuery, plannerContext, aggregateRowSignature);
final Grouping grouping = Grouping.create(dimensions, subtotals, aggregations, havingFilter, aggregateRowSignature);
if (aggregateProject == null) {
return grouping;
} else {
return grouping.applyProject(plannerContext, aggregateProject);
}
}
Aggregations