use of org.apache.druid.query.QueryPlus in project druid by druid-io.
the class MaterializedViewQueryQueryToolChest method preMergeQueryDecoration.
@Override
public QueryRunner preMergeQueryDecoration(final QueryRunner runner) {
return new QueryRunner() {
@Override
public Sequence run(QueryPlus queryPlus, ResponseContext responseContext) {
Query realQuery = getRealQuery(queryPlus.getQuery());
QueryToolChest realQueryToolChest = warehouse.getToolChest(realQuery);
QueryRunner realQueryRunner = realQueryToolChest.preMergeQueryDecoration(new MaterializedViewQueryRunner(runner, optimizer));
return realQueryRunner.run(queryPlus.withQuery(realQuery), responseContext);
}
};
}
use of org.apache.druid.query.QueryPlus in project druid by druid-io.
the class SegmentMetadataQueryRunnerFactory method createRunner.
@Override
public QueryRunner<SegmentAnalysis> createRunner(final Segment segment) {
return new QueryRunner<SegmentAnalysis>() {
@Override
public Sequence<SegmentAnalysis> run(QueryPlus<SegmentAnalysis> inQ, ResponseContext responseContext) {
SegmentMetadataQuery updatedQuery = ((SegmentMetadataQuery) inQ.getQuery()).withFinalizedAnalysisTypes(toolChest.getConfig());
final SegmentAnalyzer analyzer = new SegmentAnalyzer(updatedQuery.getAnalysisTypes());
final Map<String, ColumnAnalysis> analyzedColumns = analyzer.analyze(segment);
final long numRows = analyzer.numRows(segment);
long totalSize = 0;
if (analyzer.analyzingSize()) {
// Initialize with the size of the whitespace, 1 byte per
totalSize = analyzedColumns.size() * numRows;
}
Map<String, ColumnAnalysis> columns = new TreeMap<>();
ColumnIncluderator includerator = updatedQuery.getToInclude();
for (Map.Entry<String, ColumnAnalysis> entry : analyzedColumns.entrySet()) {
final String columnName = entry.getKey();
final ColumnAnalysis column = entry.getValue();
if (!column.isError()) {
totalSize += column.getSize();
}
if (includerator.include(columnName)) {
columns.put(columnName, column);
}
}
List<Interval> retIntervals = updatedQuery.analyzingInterval() ? Collections.singletonList(segment.getDataInterval()) : null;
final Map<String, AggregatorFactory> aggregators;
Metadata metadata = null;
if (updatedQuery.hasAggregators()) {
metadata = segment.asStorageAdapter().getMetadata();
if (metadata != null && metadata.getAggregators() != null) {
aggregators = new HashMap<>();
for (AggregatorFactory aggregator : metadata.getAggregators()) {
aggregators.put(aggregator.getName(), aggregator);
}
} else {
aggregators = null;
}
} else {
aggregators = null;
}
final TimestampSpec timestampSpec;
if (updatedQuery.hasTimestampSpec()) {
if (metadata == null) {
metadata = segment.asStorageAdapter().getMetadata();
}
timestampSpec = metadata != null ? metadata.getTimestampSpec() : null;
} else {
timestampSpec = null;
}
final Granularity queryGranularity;
if (updatedQuery.hasQueryGranularity()) {
if (metadata == null) {
metadata = segment.asStorageAdapter().getMetadata();
}
queryGranularity = metadata != null ? metadata.getQueryGranularity() : null;
} else {
queryGranularity = null;
}
Boolean rollup = null;
if (updatedQuery.hasRollup()) {
if (metadata == null) {
metadata = segment.asStorageAdapter().getMetadata();
}
rollup = metadata != null ? metadata.isRollup() : null;
if (rollup == null) {
// in this case, this segment is built before no-rollup function is coded,
// thus it is built with rollup
rollup = Boolean.TRUE;
}
}
return Sequences.simple(Collections.singletonList(new SegmentAnalysis(segment.getId().toString(), retIntervals, columns, totalSize, numRows, aggregators, timestampSpec, queryGranularity, rollup)));
}
};
}
use of org.apache.druid.query.QueryPlus in project druid by druid-io.
the class SegmentMetadataQueryRunnerFactory method mergeRunners.
@Override
public QueryRunner<SegmentAnalysis> mergeRunners(QueryProcessingPool queryProcessingPool, Iterable<QueryRunner<SegmentAnalysis>> queryRunners) {
return new ConcatQueryRunner<SegmentAnalysis>(Sequences.map(Sequences.simple(queryRunners), new Function<QueryRunner<SegmentAnalysis>, QueryRunner<SegmentAnalysis>>() {
@Override
public QueryRunner<SegmentAnalysis> apply(final QueryRunner<SegmentAnalysis> input) {
return new QueryRunner<SegmentAnalysis>() {
@Override
public Sequence<SegmentAnalysis> run(final QueryPlus<SegmentAnalysis> queryPlus, final ResponseContext responseContext) {
final Query<SegmentAnalysis> query = queryPlus.getQuery();
final int priority = QueryContexts.getPriority(query);
final QueryPlus<SegmentAnalysis> threadSafeQueryPlus = queryPlus.withoutThreadUnsafeState();
ListenableFuture<Sequence<SegmentAnalysis>> future = queryProcessingPool.submitRunnerTask(new AbstractPrioritizedQueryRunnerCallable<Sequence<SegmentAnalysis>, SegmentAnalysis>(priority, input) {
@Override
public Sequence<SegmentAnalysis> call() {
return Sequences.simple(input.run(threadSafeQueryPlus, responseContext).toList());
}
});
try {
queryWatcher.registerQueryFuture(query, future);
if (QueryContexts.hasTimeout(query)) {
return future.get(QueryContexts.getTimeout(query), TimeUnit.MILLISECONDS);
} else {
return future.get();
}
} catch (InterruptedException e) {
log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
future.cancel(true);
throw new QueryInterruptedException(e);
} catch (CancellationException e) {
throw new QueryInterruptedException(e);
} catch (TimeoutException e) {
log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
future.cancel(true);
throw new QueryTimeoutException(StringUtils.nonStrictFormat("Query [%s] timed out", query.getId()));
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
};
}
}));
}
use of org.apache.druid.query.QueryPlus in project druid by druid-io.
the class GroupByQueryRunnerTest method testMergeResults.
@Test
public void testMergeResults() {
GroupByQuery.Builder builder = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval("2011-04-02/2011-04-04").setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(new PeriodGranularity(new Period("P1M"), null, null));
final GroupByQuery fullQuery = builder.build();
final GroupByQuery allGranQuery = builder.copy().setGranularity(Granularities.ALL).build();
QueryRunner mergedRunner = factory.getToolchest().mergeResults(new QueryRunner<ResultRow>() {
@Override
public Sequence<ResultRow> run(QueryPlus<ResultRow> queryPlus, ResponseContext responseContext) {
// simulate two daily segments
final QueryPlus queryPlus1 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-04-02/2011-04-03")))));
final QueryPlus queryPlus2 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-04-03/2011-04-04")))));
return new MergeSequence(queryPlus.getQuery().getResultOrdering(), Sequences.simple(Arrays.asList(runner.run(queryPlus1, responseContext), runner.run(queryPlus2, responseContext))));
}
});
List<ResultRow> expectedResults = Arrays.asList(makeRow(fullQuery, "2011-04-01", "alias", "automotive", "rows", 2L, "idx", 269L), makeRow(fullQuery, "2011-04-01", "alias", "business", "rows", 2L, "idx", 217L), makeRow(fullQuery, "2011-04-01", "alias", "entertainment", "rows", 2L, "idx", 319L), makeRow(fullQuery, "2011-04-01", "alias", "health", "rows", 2L, "idx", 216L), makeRow(fullQuery, "2011-04-01", "alias", "mezzanine", "rows", 6L, "idx", 4420L), makeRow(fullQuery, "2011-04-01", "alias", "news", "rows", 2L, "idx", 221L), makeRow(fullQuery, "2011-04-01", "alias", "premium", "rows", 6L, "idx", 4416L), makeRow(fullQuery, "2011-04-01", "alias", "technology", "rows", 2L, "idx", 177L), makeRow(fullQuery, "2011-04-01", "alias", "travel", "rows", 2L, "idx", 243L));
ResponseContext context = ResponseContext.createEmpty();
TestHelper.assertExpectedObjects(expectedResults, mergedRunner.run(QueryPlus.wrap(fullQuery)), "merged");
List<ResultRow> allGranExpectedResults = Arrays.asList(makeRow(allGranQuery, "2011-04-02", "alias", "automotive", "rows", 2L, "idx", 269L), makeRow(allGranQuery, "2011-04-02", "alias", "business", "rows", 2L, "idx", 217L), makeRow(allGranQuery, "2011-04-02", "alias", "entertainment", "rows", 2L, "idx", 319L), makeRow(allGranQuery, "2011-04-02", "alias", "health", "rows", 2L, "idx", 216L), makeRow(allGranQuery, "2011-04-02", "alias", "mezzanine", "rows", 6L, "idx", 4420L), makeRow(allGranQuery, "2011-04-02", "alias", "news", "rows", 2L, "idx", 221L), makeRow(allGranQuery, "2011-04-02", "alias", "premium", "rows", 6L, "idx", 4416L), makeRow(allGranQuery, "2011-04-02", "alias", "technology", "rows", 2L, "idx", 177L), makeRow(allGranQuery, "2011-04-02", "alias", "travel", "rows", 2L, "idx", 243L));
TestHelper.assertExpectedObjects(allGranExpectedResults, mergedRunner.run(QueryPlus.wrap(allGranQuery)), "merged");
}
use of org.apache.druid.query.QueryPlus in project druid by druid-io.
the class GroupByLimitPushDownInsufficientBufferTest method testPartialLimitPushDownMergeForceAggs.
@Test
public void testPartialLimitPushDownMergeForceAggs() {
// one segment's results use limit push down, the other doesn't because of insufficient buffer capacity
QueryToolChest<ResultRow, GroupByQuery> toolChest = groupByFactory.getToolchest();
QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory.mergeRunners(executorService, getRunner1())), (QueryToolChest) toolChest);
QueryRunner<ResultRow> theRunner2 = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(tooSmallGroupByFactory.mergeRunners(executorService, getRunner2())), (QueryToolChest) toolChest);
QueryRunner<ResultRow> theRunner3 = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(new QueryRunner<ResultRow>() {
@Override
public Sequence<ResultRow> run(QueryPlus<ResultRow> queryPlus, ResponseContext responseContext) {
return Sequences.simple(ImmutableList.of(theRunner.run(queryPlus, responseContext), theRunner2.run(queryPlus, responseContext))).flatMerge(Function.identity(), queryPlus.getQuery().getResultOrdering());
}
}), (QueryToolChest) toolChest);
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(0, 1000000)));
GroupByQuery query = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("dimA", null)).setAggregatorSpecs(new LongSumAggregatorFactory("metA", "metA")).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("metA", OrderByColumnSpec.Direction.DESCENDING, StringComparators.NUMERIC)), 3)).setGranularity(Granularities.ALL).setContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_FORCE_LIMIT_PUSH_DOWN, true)).build();
Sequence<ResultRow> queryResult = theRunner3.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
List<ResultRow> results = queryResult.toList();
ResultRow expectedRow0 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "zortaxx", "metA", 999L);
ResultRow expectedRow1 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "foo", "metA", 200L);
ResultRow expectedRow2 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "mango", "metA", 190L);
Assert.assertEquals(3, results.size());
Assert.assertEquals(expectedRow0, results.get(0));
Assert.assertEquals(expectedRow1, results.get(1));
Assert.assertEquals(expectedRow2, results.get(2));
}
Aggregations