use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class DefaultLimitSpec method makeComparator.
private Ordering<ResultRow> makeComparator(RowSignature rowSignature, boolean hasTimestamp, List<DimensionSpec> dimensions, List<AggregatorFactory> aggs, List<PostAggregator> postAggs, boolean sortByDimsFirst) {
final Ordering<ResultRow> timeOrdering;
if (hasTimestamp) {
timeOrdering = new Ordering<ResultRow>() {
@Override
public int compare(ResultRow left, ResultRow right) {
return Longs.compare(left.getLong(0), right.getLong(0));
}
};
} else {
timeOrdering = null;
}
Map<String, DimensionSpec> dimensionsMap = new HashMap<>();
for (DimensionSpec spec : dimensions) {
dimensionsMap.put(spec.getOutputName(), spec);
}
Map<String, AggregatorFactory> aggregatorsMap = new HashMap<>();
for (final AggregatorFactory agg : aggs) {
aggregatorsMap.put(agg.getName(), agg);
}
Map<String, PostAggregator> postAggregatorsMap = new HashMap<>();
for (PostAggregator postAgg : postAggs) {
postAggregatorsMap.put(postAgg.getName(), postAgg);
}
Ordering<ResultRow> ordering = null;
for (OrderByColumnSpec columnSpec : columns) {
String columnName = columnSpec.getDimension();
Ordering<ResultRow> nextOrdering = null;
final int columnIndex = rowSignature.indexOf(columnName);
if (columnIndex >= 0) {
if (postAggregatorsMap.containsKey(columnName)) {
// noinspection unchecked
nextOrdering = metricOrdering(columnIndex, postAggregatorsMap.get(columnName).getComparator());
} else if (aggregatorsMap.containsKey(columnName)) {
// noinspection unchecked
nextOrdering = metricOrdering(columnIndex, aggregatorsMap.get(columnName).getComparator());
} else if (dimensionsMap.containsKey(columnName)) {
Optional<DimensionSpec> dimensionSpec = dimensions.stream().filter(ds -> ds.getOutputName().equals(columnName)).findFirst();
if (!dimensionSpec.isPresent()) {
throw new ISE("Could not find the dimension spec for ordering column %s", columnName);
}
nextOrdering = dimensionOrdering(columnIndex, dimensionSpec.get().getOutputType(), columnSpec.getDimensionComparator());
}
}
if (nextOrdering == null) {
throw new ISE("Unknown column in order clause[%s]", columnSpec);
}
if (columnSpec.getDirection() == OrderByColumnSpec.Direction.DESCENDING) {
nextOrdering = nextOrdering.reverse();
}
ordering = ordering == null ? nextOrdering : ordering.compound(nextOrdering);
}
if (ordering == null) {
ordering = timeOrdering;
} else if (timeOrdering != null) {
ordering = sortByDimsFirst ? ordering.compound(timeOrdering) : timeOrdering.compound(ordering);
}
// noinspection unchecked
return ordering != null ? ordering : (Ordering) Ordering.allEqual();
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class FieldAccessPostAggregatorTest method testGetTypeBeforeDecorateNil.
@Test
public void testGetTypeBeforeDecorateNil() {
FieldAccessPostAggregator postAgg = new FieldAccessPostAggregator("name", "column");
RowSignature signature = RowSignature.builder().build();
Assert.assertNull(postAgg.getType(signature));
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class BaseFilterTest method selectColumnValuesMatchingFilterUsingRowBasedColumnSelectorFactory.
private List<String> selectColumnValuesMatchingFilterUsingRowBasedColumnSelectorFactory(final DimFilter filter, final String selectColumn) {
// Generate rowSignature
final RowSignature.Builder rowSignatureBuilder = RowSignature.builder();
for (String columnName : Iterables.concat(adapter.getAvailableDimensions(), adapter.getAvailableMetrics())) {
rowSignatureBuilder.add(columnName, adapter.getColumnCapabilities(columnName).toColumnType());
}
// Perform test
final SettableSupplier<InputRow> rowSupplier = new SettableSupplier<>();
final ValueMatcher matcher = makeFilter(filter).makeMatcher(VIRTUAL_COLUMNS.wrap(RowBasedColumnSelectorFactory.create(RowAdapters.standardRow(), rowSupplier::get, rowSignatureBuilder.build(), false)));
final List<String> values = new ArrayList<>();
for (InputRow row : rows) {
rowSupplier.set(row);
if (matcher.matches()) {
values.add((String) row.getRaw(selectColumn));
}
}
return values;
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class CalciteInsertDmlTest method testInsertWithPartitionedByAndLimitOffset.
@Test
public void testInsertWithPartitionedByAndLimitOffset() {
RowSignature targetRowSignature = RowSignature.builder().add("__time", ColumnType.LONG).add("floor_m1", ColumnType.FLOAT).add("dim1", ColumnType.STRING).build();
testInsertQuery().sql("INSERT INTO druid.dst SELECT __time, FLOOR(m1) as floor_m1, dim1 FROM foo LIMIT 10 OFFSET 20 PARTITIONED BY DAY").expectTarget("dst", targetRowSignature).expectResources(dataSourceRead("foo"), dataSourceWrite("dst")).expectQuery(newScanQueryBuilder().dataSource("foo").intervals(querySegmentSpec(Filtration.eternity())).columns("__time", "dim1", "v0").virtualColumns(expressionVirtualColumn("v0", "floor(\"m1\")", ColumnType.FLOAT)).limit(10).offset(20).context(queryContextWithGranularity(Granularities.DAY)).build()).verify();
}
use of org.apache.druid.segment.column.RowSignature in project druid by druid-io.
the class CalciteInsertDmlTest method testInsertWithClusteredBy.
@Test
public void testInsertWithClusteredBy() {
// Test correctness of the query when only CLUSTERED BY clause is present
RowSignature targetRowSignature = RowSignature.builder().add("__time", ColumnType.LONG).add("floor_m1", ColumnType.FLOAT).add("dim1", ColumnType.STRING).add("EXPR$3", ColumnType.DOUBLE).build();
testInsertQuery().sql("INSERT INTO druid.dst " + "SELECT __time, FLOOR(m1) as floor_m1, dim1, CEIL(m2) FROM foo " + "PARTITIONED BY FLOOR(__time TO DAY) CLUSTERED BY 2, dim1 DESC, CEIL(m2)").expectTarget("dst", targetRowSignature).expectResources(dataSourceRead("foo"), dataSourceWrite("dst")).expectQuery(newScanQueryBuilder().dataSource("foo").intervals(querySegmentSpec(Filtration.eternity())).columns("__time", "dim1", "v0", "v1").virtualColumns(expressionVirtualColumn("v0", "floor(\"m1\")", ColumnType.FLOAT), expressionVirtualColumn("v1", "ceil(\"m2\")", ColumnType.DOUBLE)).orderBy(ImmutableList.of(new ScanQuery.OrderBy("v0", ScanQuery.Order.ASCENDING), new ScanQuery.OrderBy("dim1", ScanQuery.Order.DESCENDING), new ScanQuery.OrderBy("v1", ScanQuery.Order.ASCENDING))).context(queryContextWithGranularity(Granularities.DAY)).build()).verify();
}
Aggregations