use of org.apache.druid.query.scan.ScanQuery in project druid by druid-io.
the class DruidQuery method toScanQuery.
/**
* Return this query as a Scan query, or null if this query is not compatible with Scan.
*
* @return query or null
*/
@Nullable
private ScanQuery toScanQuery(final QueryFeatureInspector queryFeatureInspector) {
if (grouping != null) {
// Scan cannot GROUP BY.
return null;
}
if (outputRowSignature.size() == 0) {
// Should never do a scan query without any columns that we're interested in. This is probably a planner bug.
throw new ISE("Cannot convert to Scan query without any columns.");
}
final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
final DataSource newDataSource = dataSourceFiltrationPair.lhs;
final Filtration filtration = dataSourceFiltrationPair.rhs;
final List<ScanQuery.OrderBy> orderByColumns;
long scanOffset = 0L;
long scanLimit = 0L;
if (sorting != null) {
scanOffset = sorting.getOffsetLimit().getOffset();
if (sorting.getOffsetLimit().hasLimit()) {
final long limit = sorting.getOffsetLimit().getLimit();
if (limit == 0) {
// Can't handle zero limit (the Scan query engine would treat it as unlimited).
return null;
}
scanLimit = limit;
}
orderByColumns = sorting.getOrderBys().stream().map(orderBy -> new ScanQuery.OrderBy(orderBy.getDimension(), orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING ? ScanQuery.Order.DESCENDING : ScanQuery.Order.ASCENDING)).collect(Collectors.toList());
} else {
orderByColumns = Collections.emptyList();
}
if (!queryFeatureInspector.feature(QueryFeature.SCAN_CAN_ORDER_BY_NON_TIME) && (orderByColumns.size() > 1 || orderByColumns.stream().anyMatch(orderBy -> !orderBy.getColumnName().equals(ColumnHolder.TIME_COLUMN_NAME)))) {
// Cannot handle this ordering.
// Scan cannot ORDER BY non-time columns.
plannerContext.setPlanningError("SQL query requires order by non-time column %s that is not supported.", orderByColumns);
return null;
}
// Compute the list of columns to select, sorted and deduped.
final SortedSet<String> scanColumns = new TreeSet<>(outputRowSignature.getColumnNames());
orderByColumns.forEach(column -> scanColumns.add(column.getColumnName()));
return new ScanQuery(newDataSource, filtration.getQuerySegmentSpec(), getVirtualColumns(true), ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST, 0, scanOffset, scanLimit, null, orderByColumns, filtration.getDimFilter(), ImmutableList.copyOf(scanColumns), false, ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}
use of org.apache.druid.query.scan.ScanQuery in project druid by druid-io.
the class UnifiedIndexerAppenderatorsManagerTest method test_getBundle_unknownDataSource.
@Test
public void test_getBundle_unknownDataSource() {
final ScanQuery query = Druids.newScanQueryBuilder().dataSource("unknown").intervals(new MultipleIntervalSegmentSpec(Intervals.ONLY_ETERNITY)).build();
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Could not find segment walker for datasource");
manager.getBundle(query);
}
use of org.apache.druid.query.scan.ScanQuery in project druid by druid-io.
the class CalciteJoinQueryTest method testLeftJoinSubqueryWithNullKeyFilter.
@Test
@Parameters(source = QueryContextForJoinProvider.class)
public void testLeftJoinSubqueryWithNullKeyFilter(Map<String, Object> queryContext) throws Exception {
// Cannot vectorize due to 'concat' expression.
cannotVectorize();
ScanQuery nullCompatibleModePlan = newScanQueryBuilder().dataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new QueryDataSource(GroupByQuery.builder().setDataSource(new LookupDataSource("lookyloo")).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setVirtualColumns(expressionVirtualColumn("v0", "concat(\"k\",'')", ColumnType.STRING)).setDimensions(new DefaultDimensionSpec("v0", "d0")).build()), "j0.", equalsCondition(makeColumnExpression("dim1"), makeColumnExpression("j0.d0")), JoinType.INNER)).intervals(querySegmentSpec(Filtration.eternity())).columns("dim1", "j0.d0").context(queryContext).build();
ScanQuery nonNullCompatibleModePlan = newScanQueryBuilder().dataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new QueryDataSource(GroupByQuery.builder().setDataSource(new LookupDataSource("lookyloo")).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setVirtualColumns(expressionVirtualColumn("v0", "concat(\"k\",'')", ColumnType.STRING)).setDimensions(new DefaultDimensionSpec("v0", "d0")).build()), "j0.", equalsCondition(makeColumnExpression("dim1"), makeColumnExpression("j0.d0")), JoinType.LEFT)).intervals(querySegmentSpec(Filtration.eternity())).columns("dim1", "j0.d0").filters(new NotDimFilter(new SelectorDimFilter("j0.d0", null, null))).context(queryContext).build();
boolean isJoinFilterRewriteEnabled = queryContext.getOrDefault(JOIN_FILTER_REWRITE_ENABLE_KEY, true).toString().equals("true");
testQuery("SELECT dim1, l1.k\n" + "FROM foo\n" + "LEFT JOIN (select k || '' as k from lookup.lookyloo group by 1) l1 ON foo.dim1 = l1.k\n" + "WHERE l1.k IS NOT NULL\n", queryContext, ImmutableList.of(NullHandling.sqlCompatible() ? nullCompatibleModePlan : nonNullCompatibleModePlan), NullHandling.sqlCompatible() || !isJoinFilterRewriteEnabled ? ImmutableList.of(new Object[] { "abc", "abc" }) : ImmutableList.of(new Object[] { "10.1", "" }, // this result is incorrect. TODO : fix this result when the JoinFilterAnalyzer bug is fixed
new Object[] { "2", "" }, new Object[] { "1", "" }, new Object[] { "def", "" }, new Object[] { "abc", "abc" }));
}
Aggregations