Search in sources :

Example 11 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class ServerManagerForQueryErrorTest method buildQueryRunnerForSegment.

@Override
protected <T> QueryRunner<T> buildQueryRunnerForSegment(Query<T> query, SegmentDescriptor descriptor, QueryRunnerFactory<T, Query<T>> factory, QueryToolChest<T, Query<T>> toolChest, VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline, Function<SegmentReference, SegmentReference> segmentMapFn, AtomicLong cpuTimeAccumulator, Optional<byte[]> cacheKeyPrefix) {
    if (query.getContextBoolean(QUERY_RETRY_TEST_CONTEXT_KEY, false)) {
        final MutableBoolean isIgnoreSegment = new MutableBoolean(false);
        queryToIgnoredSegments.compute(query.getMostSpecificId(), (queryId, ignoredSegments) -> {
            if (ignoredSegments == null) {
                ignoredSegments = new HashSet<>();
            }
            if (ignoredSegments.size() < MAX_NUM_FALSE_MISSING_SEGMENTS_REPORTS) {
                ignoredSegments.add(descriptor);
                isIgnoreSegment.setTrue();
            }
            return ignoredSegments;
        });
        if (isIgnoreSegment.isTrue()) {
            LOG.info("Pretending I don't have segment[%s]", descriptor);
            return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
        }
    } else if (query.getContextBoolean(QUERY_TIMEOUT_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw new QueryTimeoutException("query timeout test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw new QueryTimeoutException("query timeout test");
            }
        };
    } else if (query.getContextBoolean(QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw QueryCapacityExceededException.withErrorMessageAndResolvedHost("query capacity exceeded test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw QueryCapacityExceededException.withErrorMessageAndResolvedHost("query capacity exceeded test");
            }
        };
    } else if (query.getContextBoolean(QUERY_UNSUPPORTED_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw new QueryUnsupportedException("query unsupported test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw new QueryUnsupportedException("query unsupported test");
            }
        };
    } else if (query.getContextBoolean(RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw new ResourceLimitExceededException("resource limit exceeded test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw new ResourceLimitExceededException("resource limit exceeded test");
            }
        };
    } else if (query.getContextBoolean(QUERY_FAILURE_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw new RuntimeException("query failure test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw new RuntimeException("query failure test");
            }
        };
    }
    return super.buildQueryRunnerForSegment(query, descriptor, factory, toolChest, timeline, segmentMapFn, cpuTimeAccumulator, cacheKeyPrefix);
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) SegmentManager(org.apache.druid.server.SegmentManager) Inject(com.google.inject.Inject) Smile(org.apache.druid.guice.annotations.Smile) QueryProcessingPool(org.apache.druid.query.QueryProcessingPool) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) Function(java.util.function.Function) QueryCapacityExceededException(org.apache.druid.query.QueryCapacityExceededException) HashSet(java.util.HashSet) SegmentReference(org.apache.druid.segment.SegmentReference) Query(org.apache.druid.query.Query) QueryRunner(org.apache.druid.query.QueryRunner) CachePopulator(org.apache.druid.client.cache.CachePopulator) Yielder(org.apache.druid.java.util.common.guava.Yielder) Sequence(org.apache.druid.java.util.common.guava.Sequence) YieldingAccumulator(org.apache.druid.java.util.common.guava.YieldingAccumulator) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServerConfig(org.apache.druid.server.initialization.ServerConfig) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CacheConfig(org.apache.druid.client.cache.CacheConfig) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) QueryToolChest(org.apache.druid.query.QueryToolChest) Set(java.util.Set) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) AtomicLong(java.util.concurrent.atomic.AtomicLong) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) Optional(java.util.Optional) MutableBoolean(org.apache.commons.lang3.mutable.MutableBoolean) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Cache(org.apache.druid.client.cache.Cache) Accumulator(org.apache.druid.java.util.common.guava.Accumulator) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) YieldingAccumulator(org.apache.druid.java.util.common.guava.YieldingAccumulator) Accumulator(org.apache.druid.java.util.common.guava.Accumulator) Yielder(org.apache.druid.java.util.common.guava.Yielder) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) MutableBoolean(org.apache.commons.lang3.mutable.MutableBoolean) Sequence(org.apache.druid.java.util.common.guava.Sequence) YieldingAccumulator(org.apache.druid.java.util.common.guava.YieldingAccumulator) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException)

Example 12 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class IncrementalIndexTest method testConcurrentAddRead.

@Test(timeout = 60_000L)
public void testConcurrentAddRead() throws InterruptedException, ExecutionException {
    final int dimensionCount = 5;
    final ArrayList<AggregatorFactory> ingestAggregatorFactories = new ArrayList<>(dimensionCount + 1);
    ingestAggregatorFactories.add(new CountAggregatorFactory("rows"));
    for (int i = 0; i < dimensionCount; ++i) {
        ingestAggregatorFactories.add(new LongSumAggregatorFactory(StringUtils.format("sumResult%s", i), StringUtils.format("Dim_%s", i)));
        ingestAggregatorFactories.add(new DoubleSumAggregatorFactory(StringUtils.format("doubleSumResult%s", i), StringUtils.format("Dim_%s", i)));
    }
    final ArrayList<AggregatorFactory> queryAggregatorFactories = new ArrayList<>(dimensionCount + 1);
    queryAggregatorFactories.add(new CountAggregatorFactory("rows"));
    for (int i = 0; i < dimensionCount; ++i) {
        queryAggregatorFactories.add(new LongSumAggregatorFactory(StringUtils.format("sumResult%s", i), StringUtils.format("sumResult%s", i)));
        queryAggregatorFactories.add(new DoubleSumAggregatorFactory(StringUtils.format("doubleSumResult%s", i), StringUtils.format("doubleSumResult%s", i)));
    }
    final IncrementalIndex index = indexCreator.createIndex((Object) ingestAggregatorFactories.toArray(new AggregatorFactory[0]));
    final int concurrentThreads = 2;
    final int elementsPerThread = 10_000;
    final ListeningExecutorService indexExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("index-executor-%d").setPriority(Thread.MIN_PRIORITY).build()));
    final ListeningExecutorService queryExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(concurrentThreads, new ThreadFactoryBuilder().setDaemon(false).setNameFormat("query-executor-%d").build()));
    final long timestamp = System.currentTimeMillis();
    final Interval queryInterval = Intervals.of("1900-01-01T00:00:00Z/2900-01-01T00:00:00Z");
    final List<ListenableFuture<?>> indexFutures = Lists.newArrayListWithExpectedSize(concurrentThreads);
    final List<ListenableFuture<?>> queryFutures = Lists.newArrayListWithExpectedSize(concurrentThreads);
    final Segment incrementalIndexSegment = new IncrementalIndexSegment(index, null);
    final QueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
    final AtomicInteger currentlyRunning = new AtomicInteger(0);
    final AtomicInteger concurrentlyRan = new AtomicInteger(0);
    final AtomicInteger someoneRan = new AtomicInteger(0);
    final CountDownLatch startLatch = new CountDownLatch(1);
    final CountDownLatch readyLatch = new CountDownLatch(concurrentThreads * 2);
    final AtomicInteger queriesAccumualted = new AtomicInteger(0);
    for (int j = 0; j < concurrentThreads; j++) {
        indexFutures.add(indexExecutor.submit(new Runnable() {

            @Override
            public void run() {
                readyLatch.countDown();
                try {
                    startLatch.await();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    throw new RuntimeException(e);
                }
                currentlyRunning.incrementAndGet();
                try {
                    for (int i = 0; i < elementsPerThread; i++) {
                        index.add(getLongRow(timestamp + i, dimensionCount));
                        someoneRan.incrementAndGet();
                    }
                } catch (IndexSizeExceededException e) {
                    throw new RuntimeException(e);
                }
                currentlyRunning.decrementAndGet();
            }
        }));
        final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
        queryFutures.add(queryExecutor.submit(new Runnable() {

            @Override
            public void run() {
                readyLatch.countDown();
                try {
                    startLatch.await();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    throw new RuntimeException(e);
                }
                while (concurrentlyRan.get() == 0) {
                    QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
                    Sequence<Result<TimeseriesResultValue>> sequence = runner.run(QueryPlus.wrap(query));
                    Double[] results = sequence.accumulate(new Double[0], new Accumulator<Double[], Result<TimeseriesResultValue>>() {

                        @Override
                        public Double[] accumulate(Double[] accumulated, Result<TimeseriesResultValue> in) {
                            if (currentlyRunning.get() > 0) {
                                concurrentlyRan.incrementAndGet();
                            }
                            queriesAccumualted.incrementAndGet();
                            return Lists.asList(in.getValue().getDoubleMetric("doubleSumResult0"), accumulated).toArray(new Double[0]);
                        }
                    });
                    for (Double result : results) {
                        final Integer maxValueExpected = someoneRan.get() + concurrentThreads;
                        if (maxValueExpected > 0) {
                            // Eventually consistent, but should be somewhere in that range
                            // Actual result is validated after all writes are guaranteed done.
                            Assert.assertTrue(StringUtils.format("%d >= %g >= 0 violated", maxValueExpected, result), result >= 0 && result <= maxValueExpected);
                        }
                    }
                }
            }
        }));
    }
    readyLatch.await();
    startLatch.countDown();
    List<ListenableFuture<?>> allFutures = new ArrayList<>(queryFutures.size() + indexFutures.size());
    allFutures.addAll(queryFutures);
    allFutures.addAll(indexFutures);
    Futures.allAsList(allFutures).get();
    Assert.assertTrue("Queries ran too fast", queriesAccumualted.get() > 0);
    Assert.assertTrue("Did not hit concurrency, please try again", concurrentlyRan.get() > 0);
    queryExecutor.shutdown();
    indexExecutor.shutdown();
    QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(queryInterval)).aggregators(queryAggregatorFactories).build();
    List<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
    boolean isRollup = index.isRollup();
    for (Result<TimeseriesResultValue> result : results) {
        Assert.assertEquals(elementsPerThread * (isRollup ? 1 : concurrentThreads), result.getValue().getLongMetric("rows").intValue());
        for (int i = 0; i < dimensionCount; ++i) {
            Assert.assertEquals(StringUtils.format("Failed long sum on dimension %d", i), elementsPerThread * concurrentThreads, result.getValue().getLongMetric(StringUtils.format("sumResult%s", i)).intValue());
            Assert.assertEquals(StringUtils.format("Failed double sum on dimension %d", i), elementsPerThread * concurrentThreads, result.getValue().getDoubleMetric(StringUtils.format("doubleSumResult%s", i)).intValue());
        }
    }
}
Also used : TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) ArrayList(java.util.ArrayList) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) Segment(org.apache.druid.segment.Segment) Result(org.apache.druid.query.Result) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Interval(org.joda.time.Interval) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 13 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class IncrementalIndexTest method testSingleThreadedIndexingAndQuery.

@Test
public void testSingleThreadedIndexingAndQuery() throws Exception {
    final int dimensionCount = 5;
    final ArrayList<AggregatorFactory> ingestAggregatorFactories = new ArrayList<>();
    ingestAggregatorFactories.add(new CountAggregatorFactory("rows"));
    for (int i = 0; i < dimensionCount; ++i) {
        ingestAggregatorFactories.add(new LongSumAggregatorFactory(StringUtils.format("sumResult%s", i), StringUtils.format("Dim_%s", i)));
        ingestAggregatorFactories.add(new DoubleSumAggregatorFactory(StringUtils.format("doubleSumResult%s", i), StringUtils.format("Dim_%s", i)));
    }
    final IncrementalIndex index = indexCreator.createIndex((Object) ingestAggregatorFactories.toArray(new AggregatorFactory[0]));
    final long timestamp = System.currentTimeMillis();
    final int rows = 50;
    // ingesting same data twice to have some merging happening
    for (int i = 0; i < rows; i++) {
        index.add(getLongRow(timestamp + i, dimensionCount));
    }
    for (int i = 0; i < rows; i++) {
        index.add(getLongRow(timestamp + i, dimensionCount));
    }
    // run a timeseries query on the index and verify results
    final ArrayList<AggregatorFactory> queryAggregatorFactories = new ArrayList<>();
    queryAggregatorFactories.add(new CountAggregatorFactory("rows"));
    for (int i = 0; i < dimensionCount; ++i) {
        queryAggregatorFactories.add(new LongSumAggregatorFactory(StringUtils.format("sumResult%s", i), StringUtils.format("sumResult%s", i)));
        queryAggregatorFactories.add(new DoubleSumAggregatorFactory(StringUtils.format("doubleSumResult%s", i), StringUtils.format("doubleSumResult%s", i)));
    }
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("xxx").granularity(Granularities.ALL).intervals(ImmutableList.of(Intervals.of("2000/2030"))).aggregators(queryAggregatorFactories).build();
    final Segment incrementalIndexSegment = new IncrementalIndexSegment(index, null);
    final QueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
    final QueryRunner<Result<TimeseriesResultValue>> runner = new FinalizeResultsQueryRunner<Result<TimeseriesResultValue>>(factory.createRunner(incrementalIndexSegment), factory.getToolchest());
    List<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
    Result<TimeseriesResultValue> result = Iterables.getOnlyElement(results);
    boolean isRollup = index.isRollup();
    Assert.assertEquals(rows * (isRollup ? 1 : 2), result.getValue().getLongMetric("rows").intValue());
    for (int i = 0; i < dimensionCount; ++i) {
        Assert.assertEquals("Failed long sum on dimension " + i, 2 * rows, result.getValue().getLongMetric("sumResult" + i).intValue());
        Assert.assertEquals("Failed double sum on dimension " + i, 2 * rows, result.getValue().getDoubleMetric("doubleSumResult" + i).intValue());
    }
}
Also used : TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) ArrayList(java.util.ArrayList) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) TimeseriesQueryQueryToolChest(org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) Segment(org.apache.druid.segment.Segment) Result(org.apache.druid.query.Result) TimeseriesQueryEngine(org.apache.druid.query.timeseries.TimeseriesQueryEngine) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) TimeseriesQueryRunnerFactory(org.apache.druid.query.timeseries.TimeseriesQueryRunnerFactory) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 14 with QueryRunnerFactory

use of org.apache.druid.query.QueryRunnerFactory in project druid by druid-io.

the class TestClusterQuerySegmentWalker method getQueryRunnerForSegments.

@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs) {
    final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
    if (factory == null) {
        throw new ISE("Unknown query type[%s].", query.getClass());
    }
    final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
    if (!analysis.isConcreteTableBased()) {
        throw new ISE("Cannot handle datasource: %s", query.getDataSource());
    }
    final String dataSourceName = ((TableDataSource) analysis.getBaseDataSource()).getName();
    final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
    // Make sure this query type can handle the subquery, if present.
    if (analysis.isQuery() && !toolChest.canPerformSubquery(((QueryDataSource) analysis.getDataSource()).getQuery())) {
        throw new ISE("Cannot handle subquery: %s", analysis.getDataSource());
    }
    final Function<SegmentReference, SegmentReference> segmentMapFn = joinableFactoryWrapper.createSegmentMapFn(analysis.getJoinBaseTableFilter().map(Filters::toFilter).orElse(null), analysis.getPreJoinableClauses(), new AtomicLong(), analysis.getBaseQuery().orElse(query));
    final QueryRunner<T> baseRunner = new FinalizeResultsQueryRunner<>(toolChest.postMergeQueryDecoration(toolChest.mergeResults(toolChest.preMergeQueryDecoration(makeTableRunner(toolChest, factory, getSegmentsForTable(dataSourceName, specs), segmentMapFn)))), toolChest);
    // to actually serve the queries
    return (theQuery, responseContext) -> {
        responseContext.initializeRemainingResponses();
        responseContext.addRemainingResponse(theQuery.getQuery().getMostSpecificId(), 0);
        if (scheduler != null) {
            Set<SegmentServerSelector> segments = new HashSet<>();
            specs.forEach(spec -> segments.add(new SegmentServerSelector(spec)));
            return scheduler.run(scheduler.prioritizeAndLaneQuery(theQuery, segments), new LazySequence<>(() -> baseRunner.run(theQuery.withQuery(Queries.withSpecificSegments(theQuery.getQuery(), ImmutableList.copyOf(specs))), responseContext)));
        } else {
            return baseRunner.run(theQuery.withQuery(Queries.withSpecificSegments(theQuery.getQuery(), ImmutableList.copyOf(specs))), responseContext);
        }
    };
}
Also used : DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) Function(java.util.function.Function) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) SegmentReference(org.apache.druid.segment.SegmentReference) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) Map(java.util.Map) SegmentServerSelector(org.apache.druid.client.SegmentServerSelector) QueryRunner(org.apache.druid.query.QueryRunner) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) Nullable(javax.annotation.Nullable) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) LazySequence(org.apache.druid.java.util.common.guava.LazySequence) Execs(org.apache.druid.java.util.common.concurrent.Execs) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) QueryToolChest(org.apache.druid.query.QueryToolChest) JoinableFactoryWrapper(org.apache.druid.segment.join.JoinableFactoryWrapper) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) SpecificSegmentSpec(org.apache.druid.query.spec.SpecificSegmentSpec) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) TableDataSource(org.apache.druid.query.TableDataSource) Queries(org.apache.druid.query.Queries) AtomicLong(java.util.concurrent.atomic.AtomicLong) QueryDataSource(org.apache.druid.query.QueryDataSource) List(java.util.List) ReferenceCountingSegmentQueryRunner(org.apache.druid.query.ReferenceCountingSegmentQueryRunner) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) Preconditions(com.google.common.base.Preconditions) FunctionalIterable(org.apache.druid.java.util.common.guava.FunctionalIterable) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Filters(org.apache.druid.segment.filter.Filters) Collections(java.util.Collections) HashSet(java.util.HashSet) Set(java.util.Set) Query(org.apache.druid.query.Query) SegmentReference(org.apache.druid.segment.SegmentReference) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Filters(org.apache.druid.segment.filter.Filters) AtomicLong(java.util.concurrent.atomic.AtomicLong) TableDataSource(org.apache.druid.query.TableDataSource) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) SegmentServerSelector(org.apache.druid.client.SegmentServerSelector) ISE(org.apache.druid.java.util.common.ISE) LazySequence(org.apache.druid.java.util.common.guava.LazySequence)

Aggregations

QueryRunnerFactory (org.apache.druid.query.QueryRunnerFactory)14 Query (org.apache.druid.query.Query)7 QueryRunner (org.apache.druid.query.QueryRunner)6 QueryRunnerFactoryConglomerate (org.apache.druid.query.QueryRunnerFactoryConglomerate)6 IncrementalIndexSegment (org.apache.druid.segment.IncrementalIndexSegment)6 Interval (org.joda.time.Interval)6 ArrayList (java.util.ArrayList)5 FinalizeResultsQueryRunner (org.apache.druid.query.FinalizeResultsQueryRunner)5 QueryToolChest (org.apache.druid.query.QueryToolChest)5 Result (org.apache.druid.query.Result)5 Test (org.junit.Test)5 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 Function (java.util.function.Function)4 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)4 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)4 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)3 Optional (java.util.Optional)3 CacheConfig (org.apache.druid.client.cache.CacheConfig)3 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)3 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)3