use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class GroupByLimitPushDownMultiNodeMergeTest method getRunner2.
private List<QueryRunner<ResultRow>> getRunner2(int qIndexNumber) {
List<QueryRunner<ResultRow>> runners = new ArrayList<>();
QueryableIndex index2 = groupByIndices.get(qIndexNumber);
QueryRunner<ResultRow> tooSmallRunner = makeQueryRunner(groupByFactory2, SegmentId.dummy(index2.toString()), new QueryableIndexSegment(index2, SegmentId.dummy(index2.toString())));
runners.add(groupByFactory2.getToolchest().preMergeQueryDecoration(tooSmallRunner));
return runners;
}
use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class GroupByQueryRunnerTest method testMergeResultsWithLimitPushDown.
@Test
public void testMergeResultsWithLimitPushDown() {
if (!config.getDefaultStrategy().equals(GroupByStrategySelector.STRATEGY_V2)) {
return;
}
GroupByQuery.Builder builder = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval("2011-04-02/2011-04-04").setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("alias", OrderByColumnSpec.Direction.DESCENDING)), 5)).overrideContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_FORCE_LIMIT_PUSH_DOWN, true)).setGranularity(Granularities.ALL);
final GroupByQuery allGranQuery = builder.build();
QueryRunner mergedRunner = factory.getToolchest().mergeResults(new QueryRunner<ResultRow>() {
@Override
public Sequence<ResultRow> run(QueryPlus<ResultRow> queryPlus, ResponseContext responseContext) {
// simulate two daily segments
final QueryPlus<ResultRow> queryPlus1 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-04-02/2011-04-03")))));
final QueryPlus<ResultRow> queryPlus2 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-04-03/2011-04-04")))));
return factory.getToolchest().mergeResults((queryPlus3, responseContext1) -> new MergeSequence<>(queryPlus3.getQuery().getResultOrdering(), Sequences.simple(Arrays.asList(runner.run(queryPlus1, responseContext1), runner.run(queryPlus2, responseContext1))))).run(queryPlus, responseContext);
}
});
Map<String, Object> context = new HashMap<>();
List<ResultRow> allGranExpectedResults = Arrays.asList(makeRow(allGranQuery, "2011-04-02", "alias", "travel", "rows", 2L, "idx", 243L), makeRow(allGranQuery, "2011-04-02", "alias", "technology", "rows", 2L, "idx", 177L), makeRow(allGranQuery, "2011-04-02", "alias", "premium", "rows", 6L, "idx", 4416L), makeRow(allGranQuery, "2011-04-02", "alias", "news", "rows", 2L, "idx", 221L), makeRow(allGranQuery, "2011-04-02", "alias", "mezzanine", "rows", 6L, "idx", 4420L));
TestHelper.assertExpectedObjects(allGranExpectedResults, mergedRunner.run(QueryPlus.wrap(allGranQuery)), "merged");
}
use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class DataSourceMetadataQueryTest method testMaxIngestedEventTime.
@Test
public void testMaxIngestedEventTime() throws Exception {
final IncrementalIndex rtIndex = new OnheapIncrementalIndex.Builder().setSimpleTestingIndexSchema(new CountAggregatorFactory("count")).setMaxRowCount(1000).build();
final QueryRunner runner = QueryRunnerTestHelper.makeQueryRunner(new DataSourceMetadataQueryRunnerFactory(new DataSourceQueryQueryToolChest(DefaultGenericQueryMetricsFactory.instance()), QueryRunnerTestHelper.NOOP_QUERYWATCHER), new IncrementalIndexSegment(rtIndex, SegmentId.dummy("test")), null);
DateTime timestamp = DateTimes.nowUtc();
rtIndex.add(new MapBasedInputRow(timestamp.getMillis(), ImmutableList.of("dim1"), ImmutableMap.of("dim1", "x")));
DataSourceMetadataQuery dataSourceMetadataQuery = Druids.newDataSourceMetadataQueryBuilder().dataSource("testing").build();
ResponseContext context = ConcurrentResponseContext.createEmpty();
context.initializeMissingSegments();
Iterable<Result<DataSourceMetadataResultValue>> results = runner.run(QueryPlus.wrap(dataSourceMetadataQuery), context).toList();
DataSourceMetadataResultValue val = results.iterator().next().getValue();
DateTime maxIngestedEventTime = val.getMaxIngestedEventTime();
Assert.assertEquals(timestamp, maxIngestedEventTime);
}
use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class ScanQueryResultOrderingTest method assertResultsEquals.
private void assertResultsEquals(final ScanQuery query, final List<Integer> expectedResults) {
final List<List<Pair<SegmentId, QueryRunner<ScanResultValue>>>> serverRunners = new ArrayList<>();
for (int i = 0; i <= segmentToServerMap.stream().max(Comparator.naturalOrder()).orElse(0); i++) {
serverRunners.add(new ArrayList<>());
}
for (int segmentNumber = 0; segmentNumber < segmentToServerMap.size(); segmentNumber++) {
final SegmentId segmentId = SEGMENTS.get(segmentNumber).getId();
final int serverNumber = segmentToServerMap.get(segmentNumber);
serverRunners.get(serverNumber).add(Pair.of(segmentId, segmentRunners.get(segmentNumber)));
}
// Simulates what the Historical servers would do.
final List<QueryRunner<ScanResultValue>> mergedServerRunners = serverRunners.stream().filter(runners -> !runners.isEmpty()).map(runners -> queryRunnerFactory.getToolchest().mergeResults(new QueryRunner<ScanResultValue>() {
@Override
public Sequence<ScanResultValue> run(final QueryPlus<ScanResultValue> queryPlus, final ResponseContext responseContext) {
return queryRunnerFactory.mergeRunners(Execs.directExecutor(), runners.stream().map(p -> p.rhs).collect(Collectors.toList())).run(queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleSpecificSegmentSpec(runners.stream().map(p -> p.lhs.toDescriptor()).collect(Collectors.toList())))), responseContext);
}
})).collect(Collectors.toList());
// Simulates what the Broker would do.
final QueryRunner<ScanResultValue> brokerRunner = queryRunnerFactory.getToolchest().mergeResults((queryPlus, responseContext) -> {
final List<Sequence<ScanResultValue>> sequences = mergedServerRunners.stream().map(runner -> runner.run(queryPlus.withoutThreadUnsafeState())).collect(Collectors.toList());
return new MergeSequence<>(queryPlus.getQuery().getResultOrdering(), Sequences.simple(sequences));
});
// Finally: run the query.
final List<Integer> results = runQuery((ScanQuery) Druids.ScanQueryBuilder.copy(query).limit(limit).batchSize(batchSize).build().withOverriddenContext(ImmutableMap.of(ScanQueryConfig.CTX_KEY_MAX_ROWS_QUEUED_FOR_ORDERING, maxRowsQueuedForOrdering)), brokerRunner);
Assert.assertEquals(expectedResults.stream().limit(limit == 0 ? Long.MAX_VALUE : limit).collect(Collectors.toList()), results);
}
use of org.apache.druid.query.QueryRunner in project druid by druid-io.
the class SpecificSegmentQueryRunnerTest method testRetry.
@Test
public void testRetry() throws Exception {
final ObjectMapper mapper = new DefaultObjectMapper();
SegmentDescriptor descriptor = new SegmentDescriptor(Intervals.of("2012-01-01T00:00:00Z/P1D"), "version", 0);
final SpecificSegmentQueryRunner queryRunner = new SpecificSegmentQueryRunner(new QueryRunner() {
@Override
public Sequence run(QueryPlus queryPlus, ResponseContext responseContext) {
return new Sequence() {
@Override
public Object accumulate(Object initValue, Accumulator accumulator) {
throw new SegmentMissingException("FAILSAUCE");
}
@Override
public Yielder<Object> toYielder(Object initValue, YieldingAccumulator accumulator) {
throw new SegmentMissingException("FAILSAUCE");
}
};
}
}, new SpecificSegmentSpec(descriptor));
// from accumulate
ResponseContext responseContext = ResponseContext.createEmpty();
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("foo").granularity(Granularities.ALL).intervals(ImmutableList.of(Intervals.of("2012-01-01T00:00:00Z/P1D"))).aggregators(ImmutableList.of(new CountAggregatorFactory("rows"))).build();
Sequence results = queryRunner.run(QueryPlus.wrap(query), responseContext);
results.toList();
validate(mapper, descriptor, responseContext);
// from toYielder
responseContext = ResponseContext.createEmpty();
results = queryRunner.run(QueryPlus.wrap(query), responseContext);
results.toYielder(null, new YieldingAccumulator() {
final List lists = new ArrayList<>();
@Override
public Object accumulate(Object accumulated, Object in) {
lists.add(in);
return in;
}
});
validate(mapper, descriptor, responseContext);
}
Aggregations