Search in sources :

Example 31 with QueryRunner

use of org.apache.druid.query.QueryRunner in project druid by druid-io.

the class CachingClusteredClientTest method testTimeseriesMergingOutOfOrderPartitions.

@Test
public void testTimeseriesMergingOutOfOrderPartitions() {
    final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT);
    QueryRunner runner = new FinalizeResultsQueryRunner(getDefaultQueryRunner(), new TimeseriesQueryQueryToolChest());
    testQueryCaching(runner, builder.randomQueryId().build(), Intervals.of("2011-01-05/2011-01-10"), makeTimeResults(DateTimes.of("2011-01-05T02"), 80, 100, DateTimes.of("2011-01-06T02"), 420, 520, DateTimes.of("2011-01-07T02"), 12, 2194, DateTimes.of("2011-01-08T02"), 59, 201, DateTimes.of("2011-01-09T02"), 181, 52), Intervals.of("2011-01-05/2011-01-10"), makeTimeResults(DateTimes.of("2011-01-05T00"), 85, 102, DateTimes.of("2011-01-06T00"), 412, 521, DateTimes.of("2011-01-07T00"), 122, 21894, DateTimes.of("2011-01-08T00"), 5, 20, DateTimes.of("2011-01-09T00"), 18, 521));
    TimeseriesQuery query = builder.intervals("2011-01-05/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS).randomQueryId().build();
    TestHelper.assertExpectedResults(makeRenamedTimeResults(DateTimes.of("2011-01-05T00"), 85, 102, DateTimes.of("2011-01-05T02"), 80, 100, DateTimes.of("2011-01-06T00"), 412, 521, DateTimes.of("2011-01-06T02"), 420, 520, DateTimes.of("2011-01-07T00"), 122, 21894, DateTimes.of("2011-01-07T02"), 12, 2194, DateTimes.of("2011-01-08T00"), 5, 20, DateTimes.of("2011-01-08T02"), 59, 201, DateTimes.of("2011-01-09T00"), 18, 521, DateTimes.of("2011-01-09T02"), 181, 52), runner.run(QueryPlus.wrap(query)));
}
Also used : FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) Druids(org.apache.druid.query.Druids) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) Test(org.junit.Test)

Example 32 with QueryRunner

use of org.apache.druid.query.QueryRunner in project druid by druid-io.

the class CachingClusteredClientPerfTest method testGetQueryRunnerForSegments_singleIntervalLargeSegments.

@Test(timeout = 10_000)
public void testGetQueryRunnerForSegments_singleIntervalLargeSegments() {
    final int segmentCount = 30_000;
    final Interval interval = Intervals.of("2021-02-13/2021-02-14");
    final List<SegmentDescriptor> segmentDescriptors = new ArrayList<>(segmentCount);
    final List<DataSegment> dataSegments = new ArrayList<>(segmentCount);
    final VersionedIntervalTimeline<String, ServerSelector> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
    final DruidServer server = new DruidServer("server", "localhost:9000", null, Long.MAX_VALUE, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_PRIORITY);
    for (int ii = 0; ii < segmentCount; ii++) {
        segmentDescriptors.add(new SegmentDescriptor(interval, "1", ii));
        DataSegment segment = makeDataSegment("test", interval, "1", ii);
        dataSegments.add(segment);
    }
    timeline.addAll(Iterators.transform(dataSegments.iterator(), segment -> {
        ServerSelector ss = new ServerSelector(segment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
        ss.addServerAndUpdateSegment(new QueryableDruidServer(server, new MockQueryRunner()), segment);
        return new VersionedIntervalTimeline.PartitionChunkEntry<>(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(ss));
    }));
    TimelineServerView serverView = Mockito.mock(TimelineServerView.class);
    QueryScheduler queryScheduler = Mockito.mock(QueryScheduler.class);
    // mock scheduler to return same sequence as argument
    Mockito.when(queryScheduler.run(any(), any())).thenAnswer(i -> i.getArgument(1));
    Mockito.when(queryScheduler.prioritizeAndLaneQuery(any(), any())).thenAnswer(i -> ((QueryPlus) i.getArgument(0)).getQuery());
    Mockito.doReturn(Optional.of(timeline)).when(serverView).getTimeline(any());
    Mockito.doReturn(new MockQueryRunner()).when(serverView).getQueryRunner(any());
    CachingClusteredClient cachingClusteredClient = new CachingClusteredClient(new MockQueryToolChestWareHouse(), serverView, MapCache.create(1024), TestHelper.makeJsonMapper(), Mockito.mock(CachePopulator.class), new CacheConfig(), Mockito.mock(DruidHttpClientConfig.class), Mockito.mock(DruidProcessingConfig.class), ForkJoinPool.commonPool(), queryScheduler, NoopJoinableFactory.INSTANCE, new NoopServiceEmitter());
    Query<SegmentDescriptor> fakeQuery = makeFakeQuery(interval);
    QueryRunner<SegmentDescriptor> queryRunner = cachingClusteredClient.getQueryRunnerForSegments(fakeQuery, segmentDescriptors);
    Sequence<SegmentDescriptor> sequence = queryRunner.run(QueryPlus.wrap(fakeQuery));
    Assert.assertEquals(segmentDescriptors, sequence.toList());
}
Also used : QueryPlus(org.apache.druid.query.QueryPlus) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) QueryRunner(org.apache.druid.query.QueryRunner) QueryToolChestWarehouse(org.apache.druid.query.QueryToolChestWarehouse) NoopJoinableFactory(org.apache.druid.segment.join.NoopJoinableFactory) Sequence(org.apache.druid.java.util.common.guava.Sequence) QueryScheduler(org.apache.druid.server.QueryScheduler) ImmutableMap(com.google.common.collect.ImmutableMap) DataSource(org.apache.druid.query.DataSource) CacheConfig(org.apache.druid.client.cache.CacheConfig) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) QuerySegmentSpec(org.apache.druid.query.spec.QuerySegmentSpec) DruidHttpClientConfig(org.apache.druid.guice.http.DruidHttpClientConfig) List(java.util.List) DimFilter(org.apache.druid.query.filter.DimFilter) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) DataSegment(org.apache.druid.timeline.DataSegment) ServerManagerTest(org.apache.druid.server.coordination.ServerManagerTest) Optional(java.util.Optional) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) MapCache(org.apache.druid.client.cache.MapCache) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) HighestPriorityTierSelectorStrategy(org.apache.druid.client.selector.HighestPriorityTierSelectorStrategy) Intervals(org.apache.druid.java.util.common.Intervals) TestSequence(org.apache.druid.java.util.common.guava.TestSequence) BaseQuery(org.apache.druid.query.BaseQuery) Iterators(com.google.common.collect.Iterators) ArrayList(java.util.ArrayList) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ServerSelector(org.apache.druid.client.selector.ServerSelector) Interval(org.joda.time.Interval) Query(org.apache.druid.query.Query) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) CachePopulator(org.apache.druid.client.cache.CachePopulator) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ResponseContext(org.apache.druid.query.context.ResponseContext) QueryToolChest(org.apache.druid.query.QueryToolChest) Test(org.junit.Test) TableDataSource(org.apache.druid.query.TableDataSource) Mockito(org.mockito.Mockito) TestHelper(org.apache.druid.segment.TestHelper) RandomServerSelectorStrategy(org.apache.druid.client.selector.RandomServerSelectorStrategy) Ordering(com.google.common.collect.Ordering) ForkJoinPool(java.util.concurrent.ForkJoinPool) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Assert(org.junit.Assert) Collections(java.util.Collections) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) DruidHttpClientConfig(org.apache.druid.guice.http.DruidHttpClientConfig) ServerSelector(org.apache.druid.client.selector.ServerSelector) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) HighestPriorityTierSelectorStrategy(org.apache.druid.client.selector.HighestPriorityTierSelectorStrategy) CacheConfig(org.apache.druid.client.cache.CacheConfig) QueryScheduler(org.apache.druid.server.QueryScheduler) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) CachePopulator(org.apache.druid.client.cache.CachePopulator) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) RandomServerSelectorStrategy(org.apache.druid.client.selector.RandomServerSelectorStrategy) Interval(org.joda.time.Interval) ServerManagerTest(org.apache.druid.server.coordination.ServerManagerTest) Test(org.junit.Test)

Example 33 with QueryRunner

use of org.apache.druid.query.QueryRunner in project druid by druid-io.

the class CachingClusteredClientTest method testOutOfOrderBackgroundCachePopulation.

@Test
public void testOutOfOrderBackgroundCachePopulation() {
    // to trigger the actual execution when we are ready to shuffle the order.
    abstract class DrainTask implements Runnable {
    }
    final ForwardingListeningExecutorService randomizingExecutorService = new ForwardingListeningExecutorService() {

        final ConcurrentLinkedDeque<Pair<SettableFuture, Object>> taskQueue = new ConcurrentLinkedDeque<>();

        final ListeningExecutorService delegate = MoreExecutors.listeningDecorator(// are complete before moving on to the next query run.
        Execs.directExecutor());

        @Override
        protected ListeningExecutorService delegate() {
            return delegate;
        }

        private <T> ListenableFuture<T> maybeSubmitTask(Object task, boolean wait) {
            if (wait) {
                SettableFuture<T> future = SettableFuture.create();
                taskQueue.addFirst(Pair.of(future, task));
                return future;
            } else {
                List<Pair<SettableFuture, Object>> tasks = Lists.newArrayList(taskQueue.iterator());
                Collections.shuffle(tasks, new Random(0));
                for (final Pair<SettableFuture, Object> pair : tasks) {
                    ListenableFuture future = pair.rhs instanceof Callable ? delegate.submit((Callable) pair.rhs) : delegate.submit((Runnable) pair.rhs);
                    Futures.addCallback(future, new FutureCallback() {

                        @Override
                        public void onSuccess(@Nullable Object result) {
                            pair.lhs.set(result);
                        }

                        @Override
                        public void onFailure(Throwable t) {
                            pair.lhs.setException(t);
                        }
                    });
                }
            }
            return task instanceof Callable ? delegate.submit((Callable) task) : (ListenableFuture<T>) delegate.submit((Runnable) task);
        }

        @SuppressWarnings("ParameterPackage")
        @Override
        public <T> ListenableFuture<T> submit(Callable<T> task) {
            return maybeSubmitTask(task, true);
        }

        @Override
        public ListenableFuture<?> submit(Runnable task) {
            if (task instanceof DrainTask) {
                return maybeSubmitTask(task, false);
            } else {
                return maybeSubmitTask(task, true);
            }
        }
    };
    client = makeClient(new BackgroundCachePopulator(randomizingExecutorService, JSON_MAPPER, new CachePopulatorStats(), -1));
    // callback to be run every time a query run is complete, to ensure all background
    // caching tasks are executed, and cache is populated before we move onto the next query
    queryCompletedCallback = new Runnable() {

        @Override
        public void run() {
            try {
                randomizingExecutorService.submit(new DrainTask() {

                    @Override
                    public void run() {
                    // no-op
                    }
                }).get();
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
    };
    final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT).randomQueryId();
    QueryRunner runner = new FinalizeResultsQueryRunner(getDefaultQueryRunner(), new TimeseriesQueryQueryToolChest());
    testQueryCaching(runner, builder.build(), Intervals.of("2011-01-05/2011-01-10"), makeTimeResults(DateTimes.of("2011-01-05"), 85, 102, DateTimes.of("2011-01-06"), 412, 521, DateTimes.of("2011-01-07"), 122, 21894, DateTimes.of("2011-01-08"), 5, 20, DateTimes.of("2011-01-09"), 18, 521), Intervals.of("2011-01-10/2011-01-13"), makeTimeResults(DateTimes.of("2011-01-10"), 85, 102, DateTimes.of("2011-01-11"), 412, 521, DateTimes.of("2011-01-12"), 122, 21894));
}
Also used : SettableFuture(com.google.common.util.concurrent.SettableFuture) ForwardingListeningExecutorService(com.google.common.util.concurrent.ForwardingListeningExecutorService) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) Callable(java.util.concurrent.Callable) Random(java.util.Random) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) Druids(org.apache.druid.query.Druids) FutureCallback(com.google.common.util.concurrent.FutureCallback) Pair(org.apache.druid.java.util.common.Pair) BackgroundCachePopulator(org.apache.druid.client.cache.BackgroundCachePopulator) IOException(java.io.IOException) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) ConcurrentLinkedDeque(java.util.concurrent.ConcurrentLinkedDeque) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) ForwardingListeningExecutorService(com.google.common.util.concurrent.ForwardingListeningExecutorService) Test(org.junit.Test)

Example 34 with QueryRunner

use of org.apache.druid.query.QueryRunner in project druid by druid-io.

the class CachingClusteredClientTest method testNoSegmentPruningForHashPartitionedSegments.

private void testNoSegmentPruningForHashPartitionedSegments(boolean enableSegmentPruning, @Nullable HashPartitionFunction partitionFunction, boolean useEmptyPartitionDimensions) {
    DimFilter filter = new AndDimFilter(new SelectorDimFilter("dim1", "a", null), new BoundDimFilter("dim2", "e", "zzz", true, true, false, null, StringComparators.LEXICOGRAPHIC), // Equivalent filter of dim3 below is InDimFilter("dim3", Arrays.asList("c"), null)
    new AndDimFilter(new InDimFilter("dim3", Arrays.asList("a", "c", "e", "g"), null), new BoundDimFilter("dim3", "aaa", "ddd", false, false, false, null, StringComparators.LEXICOGRAPHIC)));
    final Map<String, Object> context = new HashMap<>(CONTEXT);
    context.put(QueryContexts.SECONDARY_PARTITION_PRUNING_KEY, enableSegmentPruning);
    final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).filters(filter).granularity(GRANULARITY).intervals(SEG_SPEC).intervals("2011-01-05/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS).context(context).randomQueryId();
    TimeseriesQuery query = builder.build();
    QueryRunner runner = new FinalizeResultsQueryRunner(getDefaultQueryRunner(), new TimeseriesQueryQueryToolChest());
    final Interval interval1 = Intervals.of("2011-01-06/2011-01-07");
    final Interval interval2 = Intervals.of("2011-01-07/2011-01-08");
    final Interval interval3 = Intervals.of("2011-01-08/2011-01-09");
    final DruidServer lastServer = servers[random.nextInt(servers.length)];
    List<String> partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim1");
    final int numPartitions1 = 6;
    for (int i = 0; i < numPartitions1; i++) {
        ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions1);
        timeline.add(interval1, "v", new NumberedPartitionChunk<>(i, numPartitions1, selector));
    }
    partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim2");
    final int numPartitions2 = 3;
    for (int i = 0; i < numPartitions2; i++) {
        ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions2);
        timeline.add(interval2, "v", new NumberedPartitionChunk<>(i, numPartitions2, selector));
    }
    partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim1", "dim3");
    final int numPartitions3 = 4;
    for (int i = 0; i < numPartitions3; i++) {
        ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions3);
        timeline.add(interval3, "v", new NumberedPartitionChunk<>(i, numPartitions3, selector));
    }
    final Capture<QueryPlus> capture = Capture.newInstance();
    final Capture<ResponseContext> contextCap = Capture.newInstance();
    QueryRunner mockRunner = EasyMock.createNiceMock(QueryRunner.class);
    EasyMock.expect(mockRunner.run(EasyMock.capture(capture), EasyMock.capture(contextCap))).andReturn(Sequences.empty()).anyTimes();
    EasyMock.expect(serverView.getQueryRunner(lastServer)).andReturn(mockRunner).anyTimes();
    EasyMock.replay(serverView);
    EasyMock.replay(mockRunner);
    // Expected to read all segments
    Set<SegmentDescriptor> expcetedDescriptors = new HashSet<>();
    IntStream.range(0, numPartitions1).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval1, "v", i)));
    IntStream.range(0, numPartitions2).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval2, "v", i)));
    IntStream.range(0, numPartitions3).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval3, "v", i)));
    runner.run(QueryPlus.wrap(query)).toList();
    QuerySegmentSpec querySegmentSpec = ((TimeseriesQuery) capture.getValue().getQuery()).getQuerySegmentSpec();
    Assert.assertSame(MultipleSpecificSegmentSpec.class, querySegmentSpec.getClass());
    final Set<SegmentDescriptor> actualDescriptors = new HashSet<>(((MultipleSpecificSegmentSpec) querySegmentSpec).getDescriptors());
    Assert.assertEquals(expcetedDescriptors, actualDescriptors);
}
Also used : BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) HashMap(java.util.HashMap) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) ServerSelector(org.apache.druid.client.selector.ServerSelector) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Druids(org.apache.druid.query.Druids) ResponseContext(org.apache.druid.query.context.ResponseContext) InDimFilter(org.apache.druid.query.filter.InDimFilter) QuerySegmentSpec(org.apache.druid.query.spec.QuerySegmentSpec) QueryPlus(org.apache.druid.query.QueryPlus) HashSet(java.util.HashSet) AndDimFilter(org.apache.druid.query.filter.AndDimFilter) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) AndDimFilter(org.apache.druid.query.filter.AndDimFilter) DimFilter(org.apache.druid.query.filter.DimFilter) InDimFilter(org.apache.druid.query.filter.InDimFilter) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) OrDimFilter(org.apache.druid.query.filter.OrDimFilter) Interval(org.joda.time.Interval)

Example 35 with QueryRunner

use of org.apache.druid.query.QueryRunner in project druid by druid-io.

the class TestHttpClient method go.

@Override
public <Intermediate, Final> ListenableFuture<Final> go(Request request, HttpResponseHandler<Intermediate, Final> handler, Duration readTimeout) {
    try {
        final Query query = objectMapper.readValue(request.getContent().array(), Query.class);
        final QueryRunner queryRunner = servers.get(request.getUrl()).getQueryRunner();
        if (queryRunner == null) {
            throw new ISE("Can't find queryRunner for url[%s]", request.getUrl());
        }
        final ResponseContext responseContext = ResponseContext.createEmpty();
        final Sequence sequence = queryRunner.run(QueryPlus.wrap(query), responseContext);
        final byte[] serializedContent;
        try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
            objectMapper.writeValue(baos, sequence);
            serializedContent = baos.toByteArray();
        }
        final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith(objectMapper, RESPONSE_CTX_HEADER_LEN_LIMIT);
        final HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
        response.headers().add(QueryResource.HEADER_RESPONSE_CONTEXT, serializationResult.getResult());
        response.setContent(HeapChannelBufferFactory.getInstance().getBuffer(serializedContent, 0, serializedContent.length));
        final ClientResponse<Intermediate> intermClientResponse = handler.handleResponse(response, NOOP_TRAFFIC_COP);
        final ClientResponse<Final> finalClientResponse = handler.done(intermClientResponse);
        return Futures.immediateFuture(finalClientResponse.getObj());
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
Also used : Query(org.apache.druid.query.Query) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) Sequence(org.apache.druid.java.util.common.guava.Sequence) ByteArrayOutputStream(java.io.ByteArrayOutputStream) IOException(java.io.IOException) QueryRunner(org.apache.druid.query.QueryRunner) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) ResponseContext(org.apache.druid.query.context.ResponseContext) ISE(org.apache.druid.java.util.common.ISE)

Aggregations

QueryRunner (org.apache.druid.query.QueryRunner)106 FinalizeResultsQueryRunner (org.apache.druid.query.FinalizeResultsQueryRunner)76 Test (org.junit.Test)69 ResponseContext (org.apache.druid.query.context.ResponseContext)38 QueryPlus (org.apache.druid.query.QueryPlus)36 ArrayList (java.util.ArrayList)34 Result (org.apache.druid.query.Result)33 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)30 QueryToolChest (org.apache.druid.query.QueryToolChest)28 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)28 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)25 Sequence (org.apache.druid.java.util.common.guava.Sequence)24 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)23 TimeseriesQuery (org.apache.druid.query.timeseries.TimeseriesQuery)22 TimeseriesQueryQueryToolChest (org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest)20 Query (org.apache.druid.query.Query)18 TimeseriesResultValue (org.apache.druid.query.timeseries.TimeseriesResultValue)17 BySegmentQueryRunner (org.apache.druid.query.BySegmentQueryRunner)16 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)16 QueryableIndexSegment (org.apache.druid.segment.QueryableIndexSegment)16