use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.
the class DruidQueryTest method test_filtration_joinDataSource_intervalInBaseTableFilter_left.
@Test
public void test_filtration_joinDataSource_intervalInBaseTableFilter_left() {
DataSource dataSource = join(JoinType.LEFT, filterWithInterval);
DataSource expectedDataSource = join(JoinType.LEFT, selectorFilter);
Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, otherFilter, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
verify(pair, expectedDataSource, otherFilter, Intervals.utc(100, 200));
}
use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.
the class DruidQuery method toScanQuery.
/**
* Return this query as a Scan query, or null if this query is not compatible with Scan.
*
* @return query or null
*/
@Nullable
private ScanQuery toScanQuery(final QueryFeatureInspector queryFeatureInspector) {
if (grouping != null) {
// Scan cannot GROUP BY.
return null;
}
if (outputRowSignature.size() == 0) {
// Should never do a scan query without any columns that we're interested in. This is probably a planner bug.
throw new ISE("Cannot convert to Scan query without any columns.");
}
final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
final DataSource newDataSource = dataSourceFiltrationPair.lhs;
final Filtration filtration = dataSourceFiltrationPair.rhs;
final List<ScanQuery.OrderBy> orderByColumns;
long scanOffset = 0L;
long scanLimit = 0L;
if (sorting != null) {
scanOffset = sorting.getOffsetLimit().getOffset();
if (sorting.getOffsetLimit().hasLimit()) {
final long limit = sorting.getOffsetLimit().getLimit();
if (limit == 0) {
// Can't handle zero limit (the Scan query engine would treat it as unlimited).
return null;
}
scanLimit = limit;
}
orderByColumns = sorting.getOrderBys().stream().map(orderBy -> new ScanQuery.OrderBy(orderBy.getDimension(), orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING ? ScanQuery.Order.DESCENDING : ScanQuery.Order.ASCENDING)).collect(Collectors.toList());
} else {
orderByColumns = Collections.emptyList();
}
if (!queryFeatureInspector.feature(QueryFeature.SCAN_CAN_ORDER_BY_NON_TIME) && (orderByColumns.size() > 1 || orderByColumns.stream().anyMatch(orderBy -> !orderBy.getColumnName().equals(ColumnHolder.TIME_COLUMN_NAME)))) {
// Cannot handle this ordering.
// Scan cannot ORDER BY non-time columns.
plannerContext.setPlanningError("SQL query requires order by non-time column %s that is not supported.", orderByColumns);
return null;
}
// Compute the list of columns to select, sorted and deduped.
final SortedSet<String> scanColumns = new TreeSet<>(outputRowSignature.getColumnNames());
orderByColumns.forEach(column -> scanColumns.add(column.getColumnName()));
return new ScanQuery(newDataSource, filtration.getQuerySegmentSpec(), getVirtualColumns(true), ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST, 0, scanOffset, scanLimit, null, orderByColumns, filtration.getDimFilter(), ImmutableList.copyOf(scanColumns), false, ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}
use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.
the class DruidQuery method toTimeseriesQuery.
/**
* Return this query as a Timeseries query, or null if this query is not compatible with Timeseries.
*
* @return query
*/
@Nullable
private TimeseriesQuery toTimeseriesQuery(final QueryFeatureInspector queryFeatureInspector) {
if (!queryFeatureInspector.feature(QueryFeature.CAN_RUN_TIMESERIES) || grouping == null || grouping.getSubtotals().hasEffect(grouping.getDimensionSpecs()) || grouping.getHavingFilter() != null) {
return null;
}
if (sorting != null && sorting.getOffsetLimit().hasOffset()) {
// Timeseries cannot handle offsets.
return null;
}
final Granularity queryGranularity;
final boolean descending;
int timeseriesLimit = 0;
final Map<String, Object> theContext = new HashMap<>();
if (grouping.getDimensions().isEmpty()) {
queryGranularity = Granularities.ALL;
descending = false;
} else if (grouping.getDimensions().size() == 1) {
final DimensionExpression dimensionExpression = Iterables.getOnlyElement(grouping.getDimensions());
queryGranularity = Expressions.toQueryGranularity(dimensionExpression.getDruidExpression(), plannerContext.getExprMacroTable());
if (queryGranularity == null) {
// Timeseries only applies if the single dimension is granular __time.
return null;
}
theContext.put(TimeseriesQuery.CTX_TIMESTAMP_RESULT_FIELD, Iterables.getOnlyElement(grouping.getDimensions()).toDimensionSpec().getOutputName());
if (sorting != null) {
if (sorting.getOffsetLimit().hasLimit()) {
final long limit = sorting.getOffsetLimit().getLimit();
if (limit == 0) {
// Can't handle zero limit (the Timeseries query engine would treat it as unlimited).
return null;
}
timeseriesLimit = Ints.checkedCast(limit);
}
switch(sorting.getTimeSortKind(dimensionExpression.getOutputName())) {
case UNORDERED:
case TIME_ASCENDING:
descending = false;
break;
case TIME_DESCENDING:
descending = true;
break;
default:
// Sorting on a metric, maybe. Timeseries cannot handle.
return null;
}
} else {
// No limitSpec.
descending = false;
}
} else {
// More than one dimension, timeseries cannot handle.
return null;
}
// was originally a groupBy query, but with the grouping dimensions removed away in Grouping#applyProject
if (!Granularities.ALL.equals(queryGranularity) || grouping.hasGroupingDimensionsDropped()) {
theContext.put(TimeseriesQuery.SKIP_EMPTY_BUCKETS, true);
}
theContext.putAll(plannerContext.getQueryContext());
final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
final DataSource newDataSource = dataSourceFiltrationPair.lhs;
final Filtration filtration = dataSourceFiltrationPair.rhs;
final List<PostAggregator> postAggregators = new ArrayList<>(grouping.getPostAggregators());
if (sorting != null && sorting.getProjection() != null) {
postAggregators.addAll(sorting.getProjection().getPostAggregators());
}
return new TimeseriesQuery(newDataSource, filtration.getQuerySegmentSpec(), descending, getVirtualColumns(false), filtration.getDimFilter(), queryGranularity, grouping.getAggregatorFactories(), postAggregators, timeseriesLimit, ImmutableSortedMap.copyOf(theContext));
}
use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.
the class DruidQueryTest method test_filtration_intervalInQueryFilter.
@Test
public void test_filtration_intervalInQueryFilter() {
DataSource dataSource = new TableDataSource("test");
Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, filterWithInterval, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
verify(pair, dataSource, selectorFilter, Intervals.utc(100, 200));
}
use of org.apache.druid.sql.calcite.filtration.Filtration in project druid by druid-io.
the class DruidQueryTest method test_filtration_noJoinAndInterval.
@Test
public void test_filtration_noJoinAndInterval() {
DataSource dataSource = new TableDataSource("test");
Pair<DataSource, Filtration> pair = DruidQuery.getFiltration(dataSource, selectorFilter, VirtualColumnRegistry.create(RowSignature.empty(), TestExprMacroTable.INSTANCE));
verify(pair, dataSource, selectorFilter, Intervals.ETERNITY);
}
Aggregations