Search in sources :

Example 51 with ResponseContext

use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.

the class TimeBoundaryQueryRunnerTest method testTimeBoundaryMin.

@Test
@SuppressWarnings("unchecked")
public void testTimeBoundaryMin() {
    TimeBoundaryQuery timeBoundaryQuery = Druids.newTimeBoundaryQueryBuilder().dataSource("testing").bound(TimeBoundaryQuery.MIN_TIME).build();
    ResponseContext context = ConcurrentResponseContext.createEmpty();
    context.initializeMissingSegments();
    Iterable<Result<TimeBoundaryResultValue>> results = runner.run(QueryPlus.wrap(timeBoundaryQuery), context).toList();
    TimeBoundaryResultValue val = results.iterator().next().getValue();
    DateTime minTime = val.getMinTime();
    DateTime maxTime = val.getMaxTime();
    Assert.assertEquals(DateTimes.of("2011-01-12T00:00:00.000Z"), minTime);
    Assert.assertNull(maxTime);
}
Also used : ResponseContext(org.apache.druid.query.context.ResponseContext) ConcurrentResponseContext(org.apache.druid.query.context.ConcurrentResponseContext) DateTime(org.joda.time.DateTime) Result(org.apache.druid.query.Result) Test(org.junit.Test)

Example 52 with ResponseContext

use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.

the class SearchQueryRunnerTest method testSearchWithCardinality.

@Test
public void testSearchWithCardinality() {
    final SearchQuery searchQuery = Druids.newSearchQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).query("a").build();
    // double the value
    QueryRunner mergedRunner = TOOL_CHEST.mergeResults(new QueryRunner<Result<SearchResultValue>>() {

        @Override
        public Sequence<Result<SearchResultValue>> run(QueryPlus<Result<SearchResultValue>> queryPlus, ResponseContext responseContext) {
            final QueryPlus<Result<SearchResultValue>> queryPlus1 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-01-12/2011-02-28")))));
            final QueryPlus<Result<SearchResultValue>> queryPlus2 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-03-01/2011-04-15")))));
            return Sequences.concat(runner.run(queryPlus1, responseContext), runner.run(queryPlus2, responseContext));
        }
    });
    List<SearchHit> expectedHits = new ArrayList<>();
    expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "automotive", 91));
    expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "mezzanine", 273));
    expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "travel", 91));
    expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "health", 91));
    expectedHits.add(new SearchHit(QueryRunnerTestHelper.QUALITY_DIMENSION, "entertainment", 91));
    expectedHits.add(new SearchHit(QueryRunnerTestHelper.MARKET_DIMENSION, "total_market", 182));
    expectedHits.add(new SearchHit(QueryRunnerTestHelper.PLACEMENTISH_DIMENSION, "a", 91));
    expectedHits.add(new SearchHit(QueryRunnerTestHelper.PARTIAL_NULL_DIMENSION, "value", 182));
    checkSearchQuery(searchQuery, mergedRunner, expectedHits);
}
Also used : ArrayList(java.util.ArrayList) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) Sequence(org.apache.druid.java.util.common.guava.Sequence) QueryRunner(org.apache.druid.query.QueryRunner) Result(org.apache.druid.query.Result) ResponseContext(org.apache.druid.query.context.ResponseContext) QueryPlus(org.apache.druid.query.QueryPlus) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 53 with ResponseContext

use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.

the class SpecificSegmentQueryRunnerTest method testRetry2.

@SuppressWarnings("unchecked")
@Test
public void testRetry2() throws Exception {
    final ObjectMapper mapper = new DefaultObjectMapper();
    SegmentDescriptor descriptor = new SegmentDescriptor(Intervals.of("2012-01-01T00:00:00Z/P1D"), "version", 0);
    TimeseriesResultBuilder builder = new TimeseriesResultBuilder(DateTimes.of("2012-01-01T00:00:00Z"));
    CountAggregator rows = new CountAggregator();
    rows.aggregate();
    builder.addMetric("rows", rows.get());
    final Result<TimeseriesResultValue> value = builder.build();
    final SpecificSegmentQueryRunner queryRunner = new SpecificSegmentQueryRunner(new QueryRunner() {

        @Override
        public Sequence run(QueryPlus queryPlus, ResponseContext responseContext) {
            return Sequences.withEffect(Sequences.simple(Collections.singletonList(value)), new Runnable() {

                @Override
                public void run() {
                    throw new SegmentMissingException("FAILSAUCE");
                }
            }, Execs.directExecutor());
        }
    }, new SpecificSegmentSpec(descriptor));
    final ResponseContext responseContext = ResponseContext.createEmpty();
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("foo").granularity(Granularities.ALL).intervals(ImmutableList.of(Intervals.of("2012-01-01T00:00:00Z/P1D"))).aggregators(ImmutableList.of(new CountAggregatorFactory("rows"))).build();
    Sequence results = queryRunner.run(QueryPlus.wrap(query), responseContext);
    List<Result<TimeseriesResultValue>> res = results.toList();
    Assert.assertEquals(1, res.size());
    Result<TimeseriesResultValue> theVal = res.get(0);
    Assert.assertTrue(1L == theVal.getValue().getLongMetric("rows"));
    validate(mapper, descriptor, responseContext);
}
Also used : TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) TimeseriesResultBuilder(org.apache.druid.query.timeseries.TimeseriesResultBuilder) SegmentMissingException(org.apache.druid.segment.SegmentMissingException) Sequence(org.apache.druid.java.util.common.guava.Sequence) QueryRunner(org.apache.druid.query.QueryRunner) Result(org.apache.druid.query.Result) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) CountAggregator(org.apache.druid.query.aggregation.CountAggregator) ResponseContext(org.apache.druid.query.context.ResponseContext) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) QueryPlus(org.apache.druid.query.QueryPlus) Test(org.junit.Test)

Example 54 with ResponseContext

use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.

the class TimeSeriesUnionQueryRunnerTest method testUnionResultMerging.

@Test
public void testUnionResultMerging() {
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(new UnionDataSource(Lists.newArrayList(new TableDataSource("ds1"), new TableDataSource("ds2")))).granularity(QueryRunnerTestHelper.DAY_GRAN).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(Arrays.asList(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))).descending(descending).build();
    QueryToolChest toolChest = new TimeseriesQueryQueryToolChest();
    final List<Result<TimeseriesResultValue>> ds1 = Lists.newArrayList(new Result<>(DateTimes.of("2011-04-02"), new TimeseriesResultValue(ImmutableMap.of("rows", 1L, "idx", 2L))), new Result<>(DateTimes.of("2011-04-03"), new TimeseriesResultValue(ImmutableMap.of("rows", 3L, "idx", 4L))));
    final List<Result<TimeseriesResultValue>> ds2 = Lists.newArrayList(new Result<>(DateTimes.of("2011-04-01"), new TimeseriesResultValue(ImmutableMap.of("rows", 5L, "idx", 6L))), new Result<>(DateTimes.of("2011-04-02"), new TimeseriesResultValue(ImmutableMap.of("rows", 7L, "idx", 8L))), new Result<>(DateTimes.of("2011-04-04"), new TimeseriesResultValue(ImmutableMap.of("rows", 9L, "idx", 10L))));
    QueryRunner mergingrunner = toolChest.mergeResults(new UnionQueryRunner<>(new QueryRunner<Result<TimeseriesResultValue>>() {

        @Override
        public Sequence<Result<TimeseriesResultValue>> run(QueryPlus<Result<TimeseriesResultValue>> queryPlus, ResponseContext responseContext) {
            if (queryPlus.getQuery().getDataSource().equals(new TableDataSource("ds1"))) {
                return Sequences.simple(descending ? Lists.reverse(ds1) : ds1);
            } else {
                return Sequences.simple(descending ? Lists.reverse(ds2) : ds2);
            }
        }
    }));
    List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<>(DateTimes.of("2011-04-01"), new TimeseriesResultValue(ImmutableMap.of("rows", 5L, "idx", 6L))), new Result<>(DateTimes.of("2011-04-02"), new TimeseriesResultValue(ImmutableMap.of("rows", 8L, "idx", 10L))), new Result<>(DateTimes.of("2011-04-03"), new TimeseriesResultValue(ImmutableMap.of("rows", 3L, "idx", 4L))), new Result<>(DateTimes.of("2011-04-04"), new TimeseriesResultValue(ImmutableMap.of("rows", 9L, "idx", 10L))));
    Iterable<Result<TimeseriesResultValue>> results = mergingrunner.run(QueryPlus.wrap(query)).toList();
    assertExpectedResults(expectedResults, results);
}
Also used : LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) QueryToolChest(org.apache.druid.query.QueryToolChest) UnionDataSource(org.apache.druid.query.UnionDataSource) UnionQueryRunner(org.apache.druid.query.UnionQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) Result(org.apache.druid.query.Result) TableDataSource(org.apache.druid.query.TableDataSource) ResponseContext(org.apache.druid.query.context.ResponseContext) QueryPlus(org.apache.druid.query.QueryPlus) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 55 with ResponseContext

use of org.apache.druid.query.context.ResponseContext in project druid by druid-io.

the class UnionQueryRunner method run.

@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext responseContext) {
    Query<T> query = queryPlus.getQuery();
    final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
    if (analysis.isConcreteTableBased() && analysis.getBaseUnionDataSource().isPresent()) {
        // Union of tables.
        final UnionDataSource unionDataSource = analysis.getBaseUnionDataSource().get();
        if (unionDataSource.getDataSources().isEmpty()) {
            // Shouldn't happen, because UnionDataSource doesn't allow empty unions.
            throw new ISE("Unexpectedly received empty union");
        } else if (unionDataSource.getDataSources().size() == 1) {
            // Single table. Run as a normal query.
            return baseRunner.run(queryPlus.withQuery(Queries.withBaseDataSource(query, Iterables.getOnlyElement(unionDataSource.getDataSources()))), responseContext);
        } else {
            // Split up the tables and merge their results.
            return new MergeSequence<>(query.getResultOrdering(), Sequences.simple(Lists.transform(IntStream.range(0, unionDataSource.getDataSources().size()).mapToObj(i -> new Pair<>(unionDataSource.getDataSources().get(i), i + 1)).collect(Collectors.toList()), (Function<Pair<TableDataSource, Integer>, Sequence<T>>) singleSourceWithIndex -> baseRunner.run(queryPlus.withQuery(Queries.withBaseDataSource(query, singleSourceWithIndex.lhs).withSubQueryId(generateSubqueryId(query.getSubQueryId(), singleSourceWithIndex.lhs.getName(), singleSourceWithIndex.rhs))), responseContext))));
        }
    } else {
        // Not a union of tables. Do nothing special.
        return baseRunner.run(queryPlus, responseContext);
    }
}
Also used : IntStream(java.util.stream.IntStream) Sequence(org.apache.druid.java.util.common.guava.Sequence) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Iterables(com.google.common.collect.Iterables) StringUtils(org.apache.commons.lang.StringUtils) Function(com.google.common.base.Function) MergeSequence(org.apache.druid.java.util.common.guava.MergeSequence) ResponseContext(org.apache.druid.query.context.ResponseContext) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) Pair(org.apache.druid.java.util.common.Pair) Lists(com.google.common.collect.Lists) Sequences(org.apache.druid.java.util.common.guava.Sequences) Function(com.google.common.base.Function) ISE(org.apache.druid.java.util.common.ISE) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Pair(org.apache.druid.java.util.common.Pair)

Aggregations

ResponseContext (org.apache.druid.query.context.ResponseContext)65 Test (org.junit.Test)44 QueryRunner (org.apache.druid.query.QueryRunner)39 QueryPlus (org.apache.druid.query.QueryPlus)35 Sequence (org.apache.druid.java.util.common.guava.Sequence)30 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)26 FinalizeResultsQueryRunner (org.apache.druid.query.FinalizeResultsQueryRunner)23 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)22 Interval (org.joda.time.Interval)19 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)16 ArrayList (java.util.ArrayList)14 MergeSequence (org.apache.druid.java.util.common.guava.MergeSequence)14 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)14 Result (org.apache.druid.query.Result)12 DefaultLimitSpec (org.apache.druid.query.groupby.orderby.DefaultLimitSpec)12 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)11 List (java.util.List)10 ChainedExecutionQueryRunner (org.apache.druid.query.ChainedExecutionQueryRunner)10 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)10 OrderByColumnSpec (org.apache.druid.query.groupby.orderby.OrderByColumnSpec)10