use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class TimeBoundaryQueryRunnerTest method testTimeBoundaryMin.
@Test
@SuppressWarnings("unchecked")
public void testTimeBoundaryMin() {
TimeBoundaryQuery timeBoundaryQuery = Druids.newTimeBoundaryQueryBuilder().dataSource("testing").bound(TimeBoundaryQuery.MIN_TIME).build();
ResponseContext context = ConcurrentResponseContext.createEmpty();
context.initializeMissingSegments();
Iterable<Result<TimeBoundaryResultValue>> results = runner.run(QueryPlus.wrap(timeBoundaryQuery), context).toList();
TimeBoundaryResultValue val = results.iterator().next().getValue();
DateTime minTime = val.getMinTime();
DateTime maxTime = val.getMaxTime();
Assert.assertEquals(DateTimes.of("2011-01-12T00:00:00.000Z"), minTime);
Assert.assertNull(maxTime);
}
use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class SearchQueryRunnerTest method testSearchWithCardinality.
@Test
public void testSearchWithCardinality() {
final SearchQuery searchQuery = Druids.newSearchQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).query("a").build();
// double the value
QueryRunner mergedRunner = TOOL_CHEST.mergeResults(new QueryRunner<Result<SearchResultValue>>() {
@Override
public Sequence<Result<SearchResultValue>> run(QueryPlus<Result<SearchResultValue>> queryPlus, ResponseContext responseContext) {
final QueryPlus<Result<SearchResultValue>> queryPlus1 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-01-12/2011-02-28")))));
final QueryPlus<Result<SearchResultValue>> queryPlus2 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-03-01/2011-04-15")))));
return Sequences.concat(runner.run(queryPlus1, responseContext), runner.run(queryPlus2, responseContext));
}
});
List<SearchHit> expectedHits = new ArrayList<>();
expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "automotive", 91));
expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "mezzanine", 273));
expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "travel", 91));
expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "health", 91));
expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "entertainment", 91));
expectedHits.add(new SearchHit(QueryRunnerTestHelper.MARKET_DIMENSION, "total_market", 182));
expectedHits.add(new SearchHit(QueryRunnerTestHelper.PLACEMENTISH_DIMENSION, "a", 91));
expectedHits.add(new SearchHit(QueryRunnerTestHelper.PARTIAL_NULL_DIMENSION, "value", 182));
checkSearchQuery(searchQuery, mergedRunner, expectedHits);
}
use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class SpecificSegmentQueryRunnerTest method testRetry2.
@SuppressWarnings("unchecked")
@Test
public void testRetry2() throws Exception {
final ObjectMapper mapper = new DefaultObjectMapper();
SegmentDescriptor descriptor = new SegmentDescriptor(Intervals.of("2012-01-01T00:00:00Z/P1D"), "version", 0);
TimeseriesResultBuilder builder = new TimeseriesResultBuilder(DateTimes.of("2012-01-01T00:00:00Z"));
CountAggregator rows = new CountAggregator();
rows.aggregate();
builder.addMetric("rows", rows.get());
final Result<TimeseriesResultValue> value = builder.build();
final SpecificSegmentQueryRunner queryRunner = new SpecificSegmentQueryRunner(new QueryRunner() {
@Override
public Sequence run(QueryPlus queryPlus, ResponseContext responseContext) {
return Sequences.withEffect(Sequences.simple(Collections.singletonList(value)), new Runnable() {
@Override
public void run() {
throw new SegmentMissingException("FAILSAUCE");
}
}, Execs.directExecutor());
}
}, new SpecificSegmentSpec(descriptor));
final ResponseContext responseContext = ResponseContext.createEmpty();
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("foo").granularity(Granularities.ALL).intervals(ImmutableList.of(Intervals.of("2012-01-01T00:00:00Z/P1D"))).aggregators(ImmutableList.of(new CountAggregatorFactory("rows"))).build();
Sequence results = queryRunner.run(QueryPlus.wrap(query), responseContext);
List<Result<TimeseriesResultValue>> res = results.toList();
Assert.assertEquals(1, res.size());
Result<TimeseriesResultValue> theVal = res.get(0);
Assert.assertTrue(1L == theVal.getValue().getLongMetric("rows"));
validate(mapper, descriptor, responseContext);
}
use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class TimeSeriesUnionQueryRunnerTest method testUnionResultMerging.
@Test
public void testUnionResultMerging() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(new UnionDataSource(Lists.newArrayList(new TableDataSource("ds1"), new TableDataSource("ds2")))).granularity(QueryRunnerTestHelper.DAY_GRAN).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(Arrays.asList(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))).descending(descending).build();
QueryToolChest toolChest = new TimeseriesQueryQueryToolChest();
final List<Result<TimeseriesResultValue>> ds1 = Lists.newArrayList(new Result<>(DateTimes.of("2011-04-02"), new TimeseriesResultValue(ImmutableMap.of("rows", 1L, "idx", 2L))), new Result<>(DateTimes.of("2011-04-03"), new TimeseriesResultValue(ImmutableMap.of("rows", 3L, "idx", 4L))));
final List<Result<TimeseriesResultValue>> ds2 = Lists.newArrayList(new Result<>(DateTimes.of("2011-04-01"), new TimeseriesResultValue(ImmutableMap.of("rows", 5L, "idx", 6L))), new Result<>(DateTimes.of("2011-04-02"), new TimeseriesResultValue(ImmutableMap.of("rows", 7L, "idx", 8L))), new Result<>(DateTimes.of("2011-04-04"), new TimeseriesResultValue(ImmutableMap.of("rows", 9L, "idx", 10L))));
QueryRunner mergingrunner = toolChest.mergeResults(new UnionQueryRunner<>(new QueryRunner<Result<TimeseriesResultValue>>() {
@Override
public Sequence<Result<TimeseriesResultValue>> run(QueryPlus<Result<TimeseriesResultValue>> queryPlus, ResponseContext responseContext) {
if (queryPlus.getQuery().getDataSource().equals(new TableDataSource("ds1"))) {
return Sequences.simple(descending ? Lists.reverse(ds1) : ds1);
} else {
return Sequences.simple(descending ? Lists.reverse(ds2) : ds2);
}
}
}));
List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<>(DateTimes.of("2011-04-01"), new TimeseriesResultValue(ImmutableMap.of("rows", 5L, "idx", 6L))), new Result<>(DateTimes.of("2011-04-02"), new TimeseriesResultValue(ImmutableMap.of("rows", 8L, "idx", 10L))), new Result<>(DateTimes.of("2011-04-03"), new TimeseriesResultValue(ImmutableMap.of("rows", 3L, "idx", 4L))), new Result<>(DateTimes.of("2011-04-04"), new TimeseriesResultValue(ImmutableMap.of("rows", 9L, "idx", 10L))));
Iterable<Result<TimeseriesResultValue>> results = mergingrunner.run(QueryPlus.wrap(query)).toList();
assertExpectedResults(expectedResults, results);
}
use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.
the class UnionQueryRunner method run.
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext responseContext) {
Query<T> query = queryPlus.getQuery();
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
if (analysis.isConcreteTableBased() && analysis.getBaseUnionDataSource().isPresent()) {
// Union of tables.
final UnionDataSource unionDataSource = analysis.getBaseUnionDataSource().get();
if (unionDataSource.getDataSources().isEmpty()) {
// Shouldn't happen, because UnionDataSource doesn't allow empty unions.
throw new ISE("Unexpectedly received empty union");
} else if (unionDataSource.getDataSources().size() == 1) {
// Single table. Run as a normal query.
return baseRunner.run(queryPlus.withQuery(Queries.withBaseDataSource(query, Iterables.getOnlyElement(unionDataSource.getDataSources()))), responseContext);
} else {
// Split up the tables and merge their results.
return new MergeSequence<>(query.getResultOrdering(), Sequences.simple(Lists.transform(IntStream.range(0, unionDataSource.getDataSources().size()).mapToObj(i -> new Pair<>(unionDataSource.getDataSources().get(i), i + 1)).collect(Collectors.toList()), (Function<Pair<TableDataSource, Integer>, Sequence<T>>) singleSourceWithIndex -> baseRunner.run(queryPlus.withQuery(Queries.withBaseDataSource(query, singleSourceWithIndex.lhs).withSubQueryId(generateSubqueryId(query.getSubQueryId(), singleSourceWithIndex.lhs.getName(), singleSourceWithIndex.rhs))), responseContext))));
}
} else {
// Not a union of tables. Do nothing special.
return baseRunner.run(queryPlus, responseContext);
}
}
Aggregations