Search in sources :

Example 6 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class SearchQueryRunnerTest method testSearchWithNullValueInDimension.

@Test
public void testSearchWithNullValueInDimension() throws Exception {
    IncrementalIndex index = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withMinTimestamp(DateTimes.of("2011-01-12T00:00:00.000Z").getMillis()).build()).setMaxRowCount(10).build();
    index.add(new MapBasedInputRow(1481871600000L, Arrays.asList("name", "host"), ImmutableMap.of("name", "name1", "host", "host")));
    index.add(new MapBasedInputRow(1481871670000L, Arrays.asList("name", "table"), ImmutableMap.of("name", "name2", "table", "table")));
    SearchQuery searchQuery = Druids.newSearchQueryBuilder().dimensions(new DefaultDimensionSpec("table", "table")).dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).context(ImmutableMap.of("searchStrategy", "cursorOnly")).build();
    QueryRunnerFactory factory = new SearchQueryRunnerFactory(SELECTOR, TOOL_CHEST, QueryRunnerTestHelper.NOOP_QUERYWATCHER);
    QueryRunner runner = factory.createRunner(new QueryableIndexSegment(TestIndex.persistRealtimeAndLoadMMapped(index), SegmentId.dummy("asdf")));
    List<SearchHit> expectedHits = new ArrayList<>();
    expectedHits.add(new SearchHit("table", "table", 1));
    expectedHits.add(new SearchHit("table", NullHandling.defaultStringValue(), 1));
    checkSearchQuery(searchQuery, runner, expectedHits);
}
Also used : QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) ArrayList(java.util.ArrayList) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) QueryRunner(org.apache.druid.query.QueryRunner) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 7 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class TopNQueryRunnerBenchmark method setUp.

@BeforeClass
public static void setUp() {
    QueryRunnerFactory factory = new TopNQueryRunnerFactory(new StupidPool<ByteBuffer>("TopNQueryRunnerFactory-directBufferPool", new Supplier<ByteBuffer>() {

        @Override
        public ByteBuffer get() {
            // Instead of causing a circular dependency, we simply mimic its behavior
            return ByteBuffer.allocateDirect(2000);
        }
    }), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
    TEST_CASE_MAP.put(TestCases.rtIndex, QueryRunnerTestHelper.makeQueryRunner(factory, new IncrementalIndexSegment(TestIndex.getIncrementalTestIndex(), SEGMENT_ID), null));
    TEST_CASE_MAP.put(TestCases.mMappedTestIndex, QueryRunnerTestHelper.makeQueryRunner(factory, new QueryableIndexSegment(TestIndex.getMMappedTestIndex(), SEGMENT_ID), null));
    TEST_CASE_MAP.put(TestCases.mergedRealtimeIndex, QueryRunnerTestHelper.makeQueryRunner(factory, new QueryableIndexSegment(TestIndex.mergedRealtimeIndex(), SEGMENT_ID), null));
// Thread.sleep(10000);
}
Also used : QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) Supplier(com.google.common.base.Supplier) ByteBuffer(java.nio.ByteBuffer) BeforeClass(org.junit.BeforeClass)

Example 8 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class TimeseriesQueryRunnerBonusTest method runTimeseriesCount.

private List<Result<TimeseriesResultValue>> runTimeseriesCount(IncrementalIndex index) {
    final QueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
    final QueryRunner<Result<TimeseriesResultValue>> runner = makeQueryRunner(factory, new IncrementalIndexSegment(index, SegmentId.dummy("ds")));
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(Intervals.of("2012-01-01T00:00:00Z/P1D"))).aggregators(new CountAggregatorFactory("rows")).descending(descending).build();
    return runner.run(QueryPlus.wrap(query)).toList();
}
Also used : QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) Result(org.apache.druid.query.Result)

Example 9 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class OnheapIncrementalIndexBenchmark method testConcurrentAddRead.

@Ignore
@Test
@BenchmarkOptions(callgc = true, clock = Clock.REAL_TIME, warmupRounds = 10, benchmarkRounds = 20)
public void testConcurrentAddRead() throws InterruptedException, ExecutionException, NoSuchMethodException, IllegalAccessException, InvocationTargetException, InstantiationException {
    final int taskCount = 30;
    final int concurrentThreads = 3;
    final int elementsPerThread = 1 << 15;
    final IncrementalIndex incrementalIndex = this.incrementalIndex.getConstructor(IncrementalIndexSchema.class, boolean.class, boolean.class, boolean.class, boolean.class, int.class).newInstance(new IncrementalIndexSchema.Builder().withMetrics(factories).build(), true, true, false, true, elementsPerThread * taskCount);
    final ArrayList<AggregatorFactory> queryAggregatorFactories = new ArrayList<>(DIMENSION_COUNT + 1);
    queryAggregatorFactories.add(new CountAggregatorFactory("rows"));
    for (int i = 0; i < DIMENSION_COUNT; ++i) {
        queryAggregatorFactories.add(new LongSumAggregatorFactory(StringUtils.format("sumResult%s", i), StringUtils.format("sumResult%s", i)));
        queryAggregatorFactories.add(new DoubleSumAggregatorFactory(StringUtils.format("doubleSumResult%s", i), StringUtils.format("doubleSumResult%s", i)));
    }
    final ListeningExecutorService indexExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("index-executor-%d").setPriority(Thread.MIN_PRIORITY).build()));
    final ListeningExecutorService queryExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("query-executor-%d").build()));
    final long timestamp = System.currentTimeMillis();
    final Interval queryInterval = Intervals.of("1900-01-01T00:00:00Z/2900-01-01T00:00:00Z");
    final List<ListenableFuture<?>> indexFutures = new ArrayList<>();
    final List<ListenableFuture<?>> queryFutures = new ArrayList<>();
    final Segment incrementalIndexSegment = new IncrementalIndexSegment(incrementalIndex, null);
    final QueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
    final AtomicInteger currentlyRunning = new AtomicInteger(0);
    final AtomicBoolean concurrentlyRan = new AtomicBoolean(false);
    final AtomicBoolean someoneRan = new AtomicBoolean(false);
    for (int j = 0; j < taskCount; j++) {
        indexFutures.add(indexExecutor.submit(new Runnable() {

            @Override
            public void run() {
                currentlyRunning.incrementAndGet();
                try {
                    for (int i = 0; i < elementsPerThread; i++) {
                        incrementalIndex.add(getLongRow(timestamp + i, 1, DIMENSION_COUNT));
                    }
                } catch (IndexSizeExceededException e) {
                    throw new RuntimeException(e);
                }
                currentlyRunning.decrementAndGet();
                someoneRan.set(true);
            }
        }));
        queryFutures.add(queryExecutor.submit(new Runnable() {

            @Override
            public void run() {
                QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
                TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
                List<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
                for (Result<TimeseriesResultValue> result : results) {
                    if (someoneRan.get()) {
                        Assert.assertTrue(result.getValue().getDoubleMetric("doubleSumResult0") > 0);
                    }
                }
                if (currentlyRunning.get() > 0) {
                    concurrentlyRan.set(true);
                }
            }
        }));
    }
    List<ListenableFuture<?>> allFutures = new ArrayList<>(queryFutures.size() + indexFutures.size());
    allFutures.addAll(queryFutures);
    allFutures.addAll(indexFutures);
    Futures.allAsList(allFutures).get();
    // Assert.assertTrue("Did not hit concurrency, please try again", concurrentlyRan.get());
    queryExecutor.shutdown();
    indexExecutor.shutdown();
    QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
    List<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
    final int expectedVal = elementsPerThread * taskCount;
    for (Result<TimeseriesResultValue> result : results) {
        Assert.assertEquals(elementsPerThread, result.getValue().getLongMetric("rows").intValue());
        for (int i = 0; i < DIMENSION_COUNT; ++i) {
            Assert.assertEquals(StringUtils.format("Failed long sum on dimension %d", i), expectedVal, result.getValue().getLongMetric(StringUtils.format("sumResult%s", i)).intValue());
            Assert.assertEquals(StringUtils.format("Failed double sum on dimension %d", i), expectedVal, result.getValue().getDoubleMetric(StringUtils.format("doubleSumResult%s", i)).intValue());
        }
    }
}
Also used : TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) ArrayList(java.util.ArrayList) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) Segment(org.apache.druid.segment.Segment) Result(org.apache.druid.query.Result) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Interval(org.joda.time.Interval) Ignore(org.junit.Ignore) Test(org.junit.Test) BenchmarkOptions(com.carrotsearch.junitbenchmarks.BenchmarkOptions)

Example 10 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class SinkQuerySegmentWalker method getQueryRunnerForSegments.

@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs) {
    // We only handle one particular dataSource. Make sure that's what we have, then ignore from here on out.
    final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
    // Sanity check: make sure the query is based on the table we're meant to handle.
    if (!analysis.getBaseTableDataSource().filter(ds -> dataSource.equals(ds.getName())).isPresent()) {
        throw new ISE("Cannot handle datasource: %s", analysis.getDataSource());
    }
    final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
    if (factory == null) {
        throw new ISE("Unknown query type[%s].", query.getClass());
    }
    final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
    final boolean skipIncrementalSegment = query.getContextValue(CONTEXT_SKIP_INCREMENTAL_SEGMENT, false);
    final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
    // Make sure this query type can handle the subquery, if present.
    if (analysis.isQuery() && !toolChest.canPerformSubquery(((QueryDataSource) analysis.getDataSource()).getQuery())) {
        throw new ISE("Cannot handle subquery: %s", analysis.getDataSource());
    }
    // segmentMapFn maps each base Segment into a joined Segment if necessary.
    final Function<SegmentReference, SegmentReference> segmentMapFn = joinableFactoryWrapper.createSegmentMapFn(analysis.getJoinBaseTableFilter().map(Filters::toFilter).orElse(null), analysis.getPreJoinableClauses(), cpuTimeAccumulator, analysis.getBaseQuery().orElse(query));
    // We compute the join cache key here itself so it doesn't need to be re-computed for every segment
    final Optional<byte[]> cacheKeyPrefix = analysis.isJoin() ? joinableFactoryWrapper.computeJoinDataSourceCacheKey(analysis) : Optional.of(StringUtils.EMPTY_BYTES);
    Iterable<QueryRunner<T>> perSegmentRunners = Iterables.transform(specs, descriptor -> {
        final PartitionChunk<Sink> chunk = sinkTimeline.findChunk(descriptor.getInterval(), descriptor.getVersion(), descriptor.getPartitionNumber());
        if (chunk == null) {
            return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
        }
        final Sink theSink = chunk.getObject();
        final SegmentId sinkSegmentId = theSink.getSegment().getId();
        Iterable<QueryRunner<T>> perHydrantRunners = new SinkQueryRunners<>(Iterables.transform(theSink, hydrant -> {
            // Hydrant might swap at any point, but if it's swapped at the start
            // then we know it's *definitely* swapped.
            final boolean hydrantDefinitelySwapped = hydrant.hasSwapped();
            if (skipIncrementalSegment && !hydrantDefinitelySwapped) {
                return new Pair<>(hydrant.getSegmentDataInterval(), new NoopQueryRunner<>());
            }
            // Prevent the underlying segment from swapping when its being iterated
            final Optional<Pair<SegmentReference, Closeable>> maybeSegmentAndCloseable = hydrant.getSegmentForQuery(segmentMapFn);
            // if optional isn't present, we failed to acquire reference to the segment or any joinables
            if (!maybeSegmentAndCloseable.isPresent()) {
                return new Pair<>(hydrant.getSegmentDataInterval(), new ReportTimelineMissingSegmentQueryRunner<>(descriptor));
            }
            final Pair<SegmentReference, Closeable> segmentAndCloseable = maybeSegmentAndCloseable.get();
            try {
                QueryRunner<T> runner = factory.createRunner(segmentAndCloseable.lhs);
                // 2) Hydrants are not the same between replicas, make sure cache is local
                if (hydrantDefinitelySwapped && cache.isLocal()) {
                    StorageAdapter storageAdapter = segmentAndCloseable.lhs.asStorageAdapter();
                    long segmentMinTime = storageAdapter.getMinTime().getMillis();
                    long segmentMaxTime = storageAdapter.getMaxTime().getMillis();
                    Interval actualDataInterval = Intervals.utc(segmentMinTime, segmentMaxTime + 1);
                    runner = new CachingQueryRunner<>(makeHydrantCacheIdentifier(hydrant), cacheKeyPrefix, descriptor, actualDataInterval, objectMapper, cache, toolChest, runner, // Always populate in foreground regardless of config
                    new ForegroundCachePopulator(objectMapper, cachePopulatorStats, cacheConfig.getMaxEntrySize()), cacheConfig);
                }
                // Make it always use Closeable to decrement()
                runner = QueryRunnerHelper.makeClosingQueryRunner(runner, segmentAndCloseable.rhs);
                return new Pair<>(segmentAndCloseable.lhs.getDataInterval(), runner);
            } catch (Throwable e) {
                throw CloseableUtils.closeAndWrapInCatch(e, segmentAndCloseable.rhs);
            }
        }));
        return new SpecificSegmentQueryRunner<>(withPerSinkMetrics(new BySegmentQueryRunner<>(sinkSegmentId, descriptor.getInterval().getStart(), factory.mergeRunners(DirectQueryProcessingPool.INSTANCE, perHydrantRunners)), toolChest, sinkSegmentId, cpuTimeAccumulator), new SpecificSegmentSpec(descriptor));
    });
    final QueryRunner<T> mergedRunner = toolChest.mergeResults(factory.mergeRunners(queryProcessingPool, perSegmentRunners));
    return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<>(mergedRunner, toolChest), toolChest, emitter, cpuTimeAccumulator, true);
}
Also used : DirectQueryProcessingPool(org.apache.druid.query.DirectQueryProcessingPool) QueryRunnerHelper(org.apache.druid.query.QueryRunnerHelper) QueryProcessingPool(org.apache.druid.query.QueryProcessingPool) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) StorageAdapter(org.apache.druid.segment.StorageAdapter) Pair(org.apache.druid.java.util.common.Pair) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) SegmentReference(org.apache.druid.segment.SegmentReference) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) CacheConfig(org.apache.druid.client.cache.CacheConfig) StringUtils(org.apache.druid.java.util.common.StringUtils) JoinableFactoryWrapper(org.apache.druid.segment.join.JoinableFactoryWrapper) ISE(org.apache.druid.java.util.common.ISE) SpecificSegmentSpec(org.apache.druid.query.spec.SpecificSegmentSpec) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) SinkQueryRunners(org.apache.druid.query.SinkQueryRunners) QueryDataSource(org.apache.druid.query.QueryDataSource) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) Optional(java.util.Optional) FunctionalIterable(org.apache.druid.java.util.common.guava.FunctionalIterable) SegmentId(org.apache.druid.timeline.SegmentId) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Iterables(com.google.common.collect.Iterables) Intervals(org.apache.druid.java.util.common.Intervals) QueryMetrics(org.apache.druid.query.QueryMetrics) CachingQueryRunner(org.apache.druid.client.CachingQueryRunner) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) Function(java.util.function.Function) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) MetricsEmittingQueryRunner(org.apache.druid.query.MetricsEmittingQueryRunner) Query(org.apache.druid.query.Query) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) Sink(org.apache.druid.segment.realtime.plumber.Sink) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) QueryToolChest(org.apache.druid.query.QueryToolChest) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) AtomicLong(java.util.concurrent.atomic.AtomicLong) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) Closeable(java.io.Closeable) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Cache(org.apache.druid.client.cache.Cache) Filters(org.apache.druid.segment.filter.Filters) CloseableUtils(org.apache.druid.utils.CloseableUtils) CPUTimeMetricQueryRunner(org.apache.druid.query.CPUTimeMetricQueryRunner) Query(org.apache.druid.query.Query) Closeable(java.io.Closeable) StorageAdapter(org.apache.druid.segment.StorageAdapter) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Filters(org.apache.druid.segment.filter.Filters) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) Sink(org.apache.druid.segment.realtime.plumber.Sink) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) CachingQueryRunner(org.apache.druid.client.CachingQueryRunner) ISE(org.apache.druid.java.util.common.ISE) Pair(org.apache.druid.java.util.common.Pair) Optional(java.util.Optional) SegmentId(org.apache.druid.timeline.SegmentId) SegmentReference(org.apache.druid.segment.SegmentReference) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) CachingQueryRunner(org.apache.druid.client.CachingQueryRunner) MetricsEmittingQueryRunner(org.apache.druid.query.MetricsEmittingQueryRunner) CPUTimeMetricQueryRunner(org.apache.druid.query.CPUTimeMetricQueryRunner) AtomicLong(java.util.concurrent.atomic.AtomicLong) SinkQueryRunners(org.apache.druid.query.SinkQueryRunners) SpecificSegmentSpec(org.apache.druid.query.spec.SpecificSegmentSpec) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) Interval(org.joda.time.Interval)

Aggregations

QueryRunnerFactory (org.apache.druid.query.QueryRunnerFactory)14 Query (org.apache.druid.query.Query)7 QueryRunner (org.apache.druid.query.QueryRunner)6 QueryRunnerFactoryConglomerate (org.apache.druid.query.QueryRunnerFactoryConglomerate)6 IncrementalIndexSegment (org.apache.druid.segment.IncrementalIndexSegment)6 Interval (org.joda.time.Interval)6 ArrayList (java.util.ArrayList)5 FinalizeResultsQueryRunner (org.apache.druid.query.FinalizeResultsQueryRunner)5 QueryToolChest (org.apache.druid.query.QueryToolChest)5 Result (org.apache.druid.query.Result)5 Test (org.junit.Test)5 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 Function (java.util.function.Function)4 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)4 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)4 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)3 Optional (java.util.Optional)3 CacheConfig (org.apache.druid.client.cache.CacheConfig)3 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)3 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)3