Search in sources :

Example 1 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class ServerManagerTest method setUp.

@Before
public void setUp() {
    EmittingLogger.registerEmitter(new NoopServiceEmitter());
    queryWaitLatch = new CountDownLatch(1);
    queryWaitYieldLatch = new CountDownLatch(1);
    queryNotifyLatch = new CountDownLatch(1);
    factory = new MyQueryRunnerFactory(queryWaitLatch, queryWaitYieldLatch, queryNotifyLatch);
    serverManagerExec = Executors.newFixedThreadPool(2);
    segmentManager = new SegmentManager(new SegmentLoader() {

        @Override
        public ReferenceCountingSegment getSegment(final DataSegment segment, boolean lazy, SegmentLazyLoadFailCallback SegmentLazyLoadFailCallback) {
            return ReferenceCountingSegment.wrapSegment(new SegmentForTesting(MapUtils.getString(segment.getLoadSpec(), "version"), (Interval) segment.getLoadSpec().get("interval")), segment.getShardSpec());
        }

        @Override
        public void cleanup(DataSegment segment) {
        }
    });
    serverManager = new ServerManager(new QueryRunnerFactoryConglomerate() {

        @Override
        public <T, QueryType extends Query<T>> QueryRunnerFactory<T, QueryType> findFactory(QueryType query) {
            if (query instanceof SearchQuery) {
                return (QueryRunnerFactory) factory;
            } else {
                return null;
            }
        }
    }, new NoopServiceEmitter(), new ForwardingQueryProcessingPool(serverManagerExec), new ForegroundCachePopulator(new DefaultObjectMapper(), new CachePopulatorStats(), -1), new DefaultObjectMapper(), new LocalCacheProvider().get(), new CacheConfig(), segmentManager, NoopJoinableFactory.INSTANCE, new ServerConfig());
    loadQueryable("test", "1", Intervals.of("P1d/2011-04-01"));
    loadQueryable("test", "1", Intervals.of("P1d/2011-04-02"));
    loadQueryable("test", "2", Intervals.of("P1d/2011-04-02"));
    loadQueryable("test", "1", Intervals.of("P1d/2011-04-03"));
    loadQueryable("test", "1", Intervals.of("P1d/2011-04-04"));
    loadQueryable("test", "1", Intervals.of("P1d/2011-04-05"));
    loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T01"));
    loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T02"));
    loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T03"));
    loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T05"));
    loadQueryable("test", "2", Intervals.of("PT1h/2011-04-04T06"));
    loadQueryable("test2", "1", Intervals.of("P1d/2011-04-01"));
    loadQueryable("test2", "1", Intervals.of("P1d/2011-04-02"));
}
Also used : SearchQuery(org.apache.druid.query.search.SearchQuery) SegmentManager(org.apache.druid.server.SegmentManager) BaseQuery(org.apache.druid.query.BaseQuery) Query(org.apache.druid.query.Query) SearchQuery(org.apache.druid.query.search.SearchQuery) ForwardingQueryProcessingPool(org.apache.druid.query.ForwardingQueryProcessingPool) SegmentLazyLoadFailCallback(org.apache.druid.segment.SegmentLazyLoadFailCallback) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) SegmentLoader(org.apache.druid.segment.loading.SegmentLoader) ServerConfig(org.apache.druid.server.initialization.ServerConfig) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) LocalCacheProvider(org.apache.druid.client.cache.LocalCacheProvider) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) CacheConfig(org.apache.druid.client.cache.CacheConfig) Interval(org.joda.time.Interval) Before(org.junit.Before)

Example 2 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class ServerManager method buildAndDecorateQueryRunner.

private <T> QueryRunner<T> buildAndDecorateQueryRunner(final QueryRunnerFactory<T, Query<T>> factory, final QueryToolChest<T, Query<T>> toolChest, final SegmentReference segment, final Optional<byte[]> cacheKeyPrefix, final SegmentDescriptor segmentDescriptor, final AtomicLong cpuTimeAccumulator) {
    final SpecificSegmentSpec segmentSpec = new SpecificSegmentSpec(segmentDescriptor);
    final SegmentId segmentId = segment.getId();
    final Interval segmentInterval = segment.getDataInterval();
    // If the segment is closed after this line, ReferenceCountingSegmentQueryRunner will handle and do the right thing.
    if (segmentId == null || segmentInterval == null) {
        return new ReportTimelineMissingSegmentQueryRunner<>(segmentDescriptor);
    }
    String segmentIdString = segmentId.toString();
    MetricsEmittingQueryRunner<T> metricsEmittingQueryRunnerInner = new MetricsEmittingQueryRunner<>(emitter, toolChest, new ReferenceCountingSegmentQueryRunner<>(factory, segment, segmentDescriptor), QueryMetrics::reportSegmentTime, queryMetrics -> queryMetrics.segment(segmentIdString));
    StorageAdapter storageAdapter = segment.asStorageAdapter();
    long segmentMaxTime = storageAdapter.getMaxTime().getMillis();
    long segmentMinTime = storageAdapter.getMinTime().getMillis();
    Interval actualDataInterval = Intervals.utc(segmentMinTime, segmentMaxTime + 1);
    CachingQueryRunner<T> cachingQueryRunner = new CachingQueryRunner<>(segmentIdString, cacheKeyPrefix, segmentDescriptor, actualDataInterval, objectMapper, cache, toolChest, metricsEmittingQueryRunnerInner, cachePopulator, cacheConfig);
    BySegmentQueryRunner<T> bySegmentQueryRunner = new BySegmentQueryRunner<>(segmentId, segmentInterval.getStart(), cachingQueryRunner);
    MetricsEmittingQueryRunner<T> metricsEmittingQueryRunnerOuter = new MetricsEmittingQueryRunner<>(emitter, toolChest, bySegmentQueryRunner, QueryMetrics::reportSegmentAndCacheTime, queryMetrics -> queryMetrics.segment(segmentIdString)).withWaitMeasuredFromNow();
    SpecificSegmentQueryRunner<T> specificSegmentQueryRunner = new SpecificSegmentQueryRunner<>(metricsEmittingQueryRunnerOuter, segmentSpec);
    PerSegmentOptimizingQueryRunner<T> perSegmentOptimizingQueryRunner = new PerSegmentOptimizingQueryRunner<>(specificSegmentQueryRunner, new PerSegmentQueryOptimizationContext(segmentDescriptor));
    return new SetAndVerifyContextQueryRunner<>(serverConfig, CPUTimeMetricQueryRunner.safeBuild(perSegmentOptimizingQueryRunner, toolChest, emitter, cpuTimeAccumulator, false));
}
Also used : SegmentManager(org.apache.druid.server.SegmentManager) Inject(com.google.inject.Inject) Smile(org.apache.druid.guice.annotations.Smile) QueryProcessingPool(org.apache.druid.query.QueryProcessingPool) StorageAdapter(org.apache.druid.segment.StorageAdapter) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) SegmentReference(org.apache.druid.segment.SegmentReference) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) CacheConfig(org.apache.druid.client.cache.CacheConfig) StringUtils(org.apache.druid.java.util.common.StringUtils) JoinableFactoryWrapper(org.apache.druid.segment.join.JoinableFactoryWrapper) ISE(org.apache.druid.java.util.common.ISE) SpecificSegmentSpec(org.apache.druid.query.spec.SpecificSegmentSpec) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) SetAndVerifyContextQueryRunner(org.apache.druid.server.SetAndVerifyContextQueryRunner) QueryDataSource(org.apache.druid.query.QueryDataSource) PerSegmentQueryOptimizationContext(org.apache.druid.query.PerSegmentQueryOptimizationContext) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) Optional(java.util.Optional) PerSegmentOptimizingQueryRunner(org.apache.druid.query.PerSegmentOptimizingQueryRunner) FunctionalIterable(org.apache.druid.java.util.common.guava.FunctionalIterable) SegmentId(org.apache.druid.timeline.SegmentId) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Intervals(org.apache.druid.java.util.common.Intervals) QueryMetrics(org.apache.druid.query.QueryMetrics) CachingQueryRunner(org.apache.druid.client.CachingQueryRunner) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) Function(java.util.function.Function) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) MetricsEmittingQueryRunner(org.apache.druid.query.MetricsEmittingQueryRunner) Query(org.apache.druid.query.Query) CachePopulator(org.apache.druid.client.cache.CachePopulator) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServerConfig(org.apache.druid.server.initialization.ServerConfig) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) QueryToolChest(org.apache.druid.query.QueryToolChest) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) AtomicLong(java.util.concurrent.atomic.AtomicLong) ReferenceCountingSegmentQueryRunner(org.apache.druid.query.ReferenceCountingSegmentQueryRunner) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Cache(org.apache.druid.client.cache.Cache) Filters(org.apache.druid.segment.filter.Filters) Collections(java.util.Collections) CPUTimeMetricQueryRunner(org.apache.druid.query.CPUTimeMetricQueryRunner) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) SegmentId(org.apache.druid.timeline.SegmentId) StorageAdapter(org.apache.druid.segment.StorageAdapter) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) MetricsEmittingQueryRunner(org.apache.druid.query.MetricsEmittingQueryRunner) PerSegmentQueryOptimizationContext(org.apache.druid.query.PerSegmentQueryOptimizationContext) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) SpecificSegmentSpec(org.apache.druid.query.spec.SpecificSegmentSpec) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) CachingQueryRunner(org.apache.druid.client.CachingQueryRunner) SetAndVerifyContextQueryRunner(org.apache.druid.server.SetAndVerifyContextQueryRunner) QueryMetrics(org.apache.druid.query.QueryMetrics) PerSegmentOptimizingQueryRunner(org.apache.druid.query.PerSegmentOptimizingQueryRunner) Interval(org.joda.time.Interval)

Example 3 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class DumpSegmentTest method testExecuteQuery.

@Test
public void testExecuteQuery() {
    Injector injector = Mockito.mock(Injector.class);
    QueryRunnerFactoryConglomerate conglomerate = Mockito.mock(QueryRunnerFactoryConglomerate.class);
    QueryRunnerFactory factory = Mockito.mock(QueryRunnerFactory.class, Mockito.RETURNS_DEEP_STUBS);
    QueryRunner runner = Mockito.mock(QueryRunner.class);
    QueryRunner mergeRunner = Mockito.mock(QueryRunner.class);
    Query query = Mockito.mock(Query.class);
    Sequence expected = Sequences.simple(Collections.singletonList(123));
    Mockito.when(injector.getInstance(QueryRunnerFactoryConglomerate.class)).thenReturn(conglomerate);
    Mockito.when(conglomerate.findFactory(ArgumentMatchers.any())).thenReturn(factory);
    Mockito.when(factory.createRunner(ArgumentMatchers.any())).thenReturn(runner);
    Mockito.when(factory.getToolchest().mergeResults(factory.mergeRunners(DirectQueryProcessingPool.INSTANCE, ImmutableList.of(runner)))).thenReturn(mergeRunner);
    Mockito.when(mergeRunner.run(ArgumentMatchers.any(), ArgumentMatchers.any())).thenReturn(expected);
    Sequence actual = DumpSegment.executeQuery(injector, null, query);
    Assert.assertSame(expected, actual);
}
Also used : QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) Query(org.apache.druid.query.Query) Injector(com.google.inject.Injector) Sequence(org.apache.druid.java.util.common.guava.Sequence) QueryRunner(org.apache.druid.query.QueryRunner) Test(org.junit.Test)

Example 4 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class CachingClusteredClientBenchmark method setup.

@Setup(Level.Trial)
public void setup() {
    final String schemaName = "basic";
    parallelCombine = parallelism > 0;
    GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
    Map<DataSegment, QueryableIndex> queryableIndexes = Maps.newHashMapWithExpectedSize(numServers);
    for (int i = 0; i < numServers; i++) {
        final DataSegment dataSegment = DataSegment.builder().dataSource(DATA_SOURCE).interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(i)).size(0).build();
        final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
        LOG.info("Starting benchmark setup using cacheDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
        final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
        queryableIndexes.put(dataSegment, index);
    }
    final DruidProcessingConfig processingConfig = new DruidProcessingConfig() {

        @Override
        public String getFormatString() {
            return null;
        }

        @Override
        public int intermediateComputeSizeBytes() {
            return PROCESSING_BUFFER_SIZE;
        }

        @Override
        public int getNumMergeBuffers() {
            return 1;
        }

        @Override
        public int getNumThreads() {
            return numProcessingThreads;
        }

        @Override
        public boolean useParallelMergePool() {
            return true;
        }
    };
    conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>builder().put(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(TopNQuery.class, new TopNQueryRunnerFactory(new StupidPool<>("TopNQueryRunnerFactory-bufferPool", () -> ByteBuffer.allocate(PROCESSING_BUFFER_SIZE)), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(GroupByQuery.class, makeGroupByQueryRunnerFactory(GroupByQueryRunnerTest.DEFAULT_MAPPER, new GroupByQueryConfig() {

        @Override
        public String getDefaultStrategy() {
            return GroupByStrategySelector.STRATEGY_V2;
        }
    }, processingConfig)).build());
    toolChestWarehouse = new QueryToolChestWarehouse() {

        @Override
        public <T, QueryType extends Query<T>> QueryToolChest<T, QueryType> getToolChest(final QueryType query) {
            return conglomerate.findFactory(query).getToolchest();
        }
    };
    SimpleServerView serverView = new SimpleServerView();
    int serverSuffx = 1;
    for (Entry<DataSegment, QueryableIndex> entry : queryableIndexes.entrySet()) {
        serverView.addServer(createServer(serverSuffx++), entry.getKey(), entry.getValue());
    }
    processingPool = Execs.multiThreaded(processingConfig.getNumThreads(), "caching-clustered-client-benchmark");
    forkJoinPool = new ForkJoinPool((int) Math.ceil(Runtime.getRuntime().availableProcessors() * 0.75), ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true);
    cachingClusteredClient = new CachingClusteredClient(toolChestWarehouse, serverView, MapCache.create(0), JSON_MAPPER, new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), 0), new CacheConfig(), new DruidHttpClientConfig(), processingConfig, forkJoinPool, QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
}
Also used : TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) TopNQuery(org.apache.druid.query.topn.TopNQuery) Query(org.apache.druid.query.Query) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) TopNQueryQueryToolChest(org.apache.druid.query.topn.TopNQueryQueryToolChest) GroupByQueryQueryToolChest(org.apache.druid.query.groupby.GroupByQueryQueryToolChest) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) QueryToolChest(org.apache.druid.query.QueryToolChest) DataSegment(org.apache.druid.timeline.DataSegment) DruidHttpClientConfig(org.apache.druid.guice.http.DruidHttpClientConfig) SegmentGenerator(org.apache.druid.segment.generator.SegmentGenerator) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) TopNQueryRunnerFactory(org.apache.druid.query.topn.TopNQueryRunnerFactory) TopNQueryQueryToolChest(org.apache.druid.query.topn.TopNQueryQueryToolChest) QueryToolChestWarehouse(org.apache.druid.query.QueryToolChestWarehouse) CacheConfig(org.apache.druid.client.cache.CacheConfig) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) CachingClusteredClient(org.apache.druid.client.CachingClusteredClient) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) GeneratorSchemaInfo(org.apache.druid.segment.generator.GeneratorSchemaInfo) DefaultQueryRunnerFactoryConglomerate(org.apache.druid.query.DefaultQueryRunnerFactoryConglomerate) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) TopNQueryRunnerFactory(org.apache.druid.query.topn.TopNQueryRunnerFactory) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) GroupByQueryRunnerFactory(org.apache.druid.query.groupby.GroupByQueryRunnerFactory) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) TopNQueryConfig(org.apache.druid.query.topn.TopNQueryConfig) QueryableIndex(org.apache.druid.segment.QueryableIndex) StupidPool(org.apache.druid.collections.StupidPool) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) ForkJoinPool(java.util.concurrent.ForkJoinPool) Setup(org.openjdk.jmh.annotations.Setup)

Example 5 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class TopNQueryQueryToolChestTest method testMinTopNThreshold.

@Test
public void testMinTopNThreshold() {
    TopNQueryConfig config = new TopNQueryConfig();
    final TopNQueryQueryToolChest chest = new TopNQueryQueryToolChest(config);
    try (CloseableStupidPool<ByteBuffer> pool = TestQueryRunners.createDefaultNonBlockingPool()) {
        QueryRunnerFactory factory = new TopNQueryRunnerFactory(pool, chest, QueryRunnerTestHelper.NOOP_QUERYWATCHER);
        QueryRunner<Result<TopNResultValue>> runner = QueryRunnerTestHelper.makeQueryRunner(factory, new IncrementalIndexSegment(TestIndex.getIncrementalTestIndex(), SEGMENT_ID), null);
        Map<String, Object> context = new HashMap<>();
        context.put("minTopNThreshold", 500);
        TopNQueryBuilder builder = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension(QueryRunnerTestHelper.PLACEMENTISH_DIMENSION).metric(QueryRunnerTestHelper.INDEX_METRIC).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(QueryRunnerTestHelper.COMMON_DOUBLE_AGGREGATORS);
        TopNQuery query1 = builder.threshold(10).context(null).build();
        MockQueryRunner mockRunner = new MockQueryRunner(runner);
        new TopNQueryQueryToolChest.ThresholdAdjustingQueryRunner(mockRunner, config).run(QueryPlus.wrap(query1));
        Assert.assertEquals(1000, mockRunner.query.getThreshold());
        TopNQuery query2 = builder.threshold(10).context(context).build();
        new TopNQueryQueryToolChest.ThresholdAdjustingQueryRunner(mockRunner, config).run(QueryPlus.wrap(query2));
        Assert.assertEquals(500, mockRunner.query.getThreshold());
        TopNQuery query3 = builder.threshold(2000).context(context).build();
        new TopNQueryQueryToolChest.ThresholdAdjustingQueryRunner(mockRunner, config).run(QueryPlus.wrap(query3));
        Assert.assertEquals(2000, mockRunner.query.getThreshold());
    }
}
Also used : IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) HashMap(java.util.HashMap) SerializablePairLongString(org.apache.druid.query.aggregation.SerializablePairLongString) ByteBuffer(java.nio.ByteBuffer) Result(org.apache.druid.query.Result) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Aggregations

QueryRunnerFactory (org.apache.druid.query.QueryRunnerFactory)14 Query (org.apache.druid.query.Query)7 QueryRunner (org.apache.druid.query.QueryRunner)6 QueryRunnerFactoryConglomerate (org.apache.druid.query.QueryRunnerFactoryConglomerate)6 IncrementalIndexSegment (org.apache.druid.segment.IncrementalIndexSegment)6 Interval (org.joda.time.Interval)6 ArrayList (java.util.ArrayList)5 FinalizeResultsQueryRunner (org.apache.druid.query.FinalizeResultsQueryRunner)5 QueryToolChest (org.apache.druid.query.QueryToolChest)5 Result (org.apache.druid.query.Result)5 Test (org.junit.Test)5 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 Function (java.util.function.Function)4 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)4 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)4 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)3 Optional (java.util.Optional)3 CacheConfig (org.apache.druid.client.cache.CacheConfig)3 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)3 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)3