Search in sources :

Example 11 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project hive by apache.

the class DruidStorageHandlerUtils method getTimelineForIntervalWithHandle.

private static VersionedIntervalTimeline<String, DataSegment> getTimelineForIntervalWithHandle(final Handle handle, final String dataSource, final Interval interval, final MetadataStorageTablesConfig dbTables) throws IOException {
    Query<Map<String, Object>> sql = handle.createQuery(String.format("SELECT payload FROM %s WHERE used = true AND dataSource = ? AND start <= ? AND \"end\" >= ?", dbTables.getSegmentsTable())).bind(0, dataSource).bind(1, interval.getEnd().toString()).bind(2, interval.getStart().toString());
    final VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
    try (ResultIterator<byte[]> dbSegments = sql.map(ByteArrayMapper.FIRST).iterator()) {
        while (dbSegments.hasNext()) {
            final byte[] payload = dbSegments.next();
            DataSegment segment = JSON_MAPPER.readValue(payload, DataSegment.class);
            timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
        }
    }
    return timeline;
}
Also used : VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) GenericUDFToString(org.apache.hadoop.hive.ql.udf.generic.GenericUDFToString) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) DataSegment(org.apache.druid.timeline.DataSegment)

Example 12 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class AbstractITBatchIndexTest method submitTaskAndWait.

protected void submitTaskAndWait(String taskSpec, String dataSourceName, boolean waitForNewVersion, boolean waitForSegmentsToLoad, Pair<Boolean, Boolean> segmentAvailabilityConfirmationPair) {
    final List<DataSegment> oldVersions = waitForNewVersion ? coordinator.getAvailableSegments(dataSourceName) : null;
    long startSubTaskCount = -1;
    final boolean assertRunsSubTasks = taskSpec.contains("index_parallel");
    if (assertRunsSubTasks) {
        startSubTaskCount = countCompleteSubTasks(dataSourceName, !taskSpec.contains("dynamic"));
    }
    final String taskID = indexer.submitTask(taskSpec);
    LOG.info("TaskID for loading index task %s", taskID);
    indexer.waitUntilTaskCompletes(taskID);
    if (assertRunsSubTasks) {
        final boolean perfectRollup = !taskSpec.contains("dynamic");
        final long newSubTasks = countCompleteSubTasks(dataSourceName, perfectRollup) - startSubTaskCount;
        Assert.assertTrue(newSubTasks > 0, StringUtils.format("The supervisor task[%s] didn't create any sub tasks. Was it executed in the parallel mode?", taskID));
    }
    if (segmentAvailabilityConfirmationPair.lhs != null && segmentAvailabilityConfirmationPair.lhs) {
        TaskReport reportRaw = indexer.getTaskReport(taskID).get("ingestionStatsAndErrors");
        IngestionStatsAndErrorsTaskReport report = (IngestionStatsAndErrorsTaskReport) reportRaw;
        IngestionStatsAndErrorsTaskReportData reportData = (IngestionStatsAndErrorsTaskReportData) report.getPayload();
        // Confirm that the task waited longer than 0ms for the task to complete.
        Assert.assertTrue(reportData.getSegmentAvailabilityWaitTimeMs() > 0);
        // Make sure that the result of waiting for segments to load matches the expected result
        if (segmentAvailabilityConfirmationPair.rhs != null) {
            Assert.assertEquals(Boolean.valueOf(reportData.isSegmentAvailabilityConfirmed()), segmentAvailabilityConfirmationPair.rhs);
        }
    }
    // original segments have loaded.
    if (waitForNewVersion) {
        ITRetryUtil.retryUntilTrue(() -> {
            final VersionedIntervalTimeline<String, DataSegment> timeline = VersionedIntervalTimeline.forSegments(coordinator.getAvailableSegments(dataSourceName));
            final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(Intervals.ETERNITY);
            return FluentIterable.from(holders).transformAndConcat(TimelineObjectHolder::getObject).anyMatch(chunk -> FluentIterable.from(oldVersions).anyMatch(oldSegment -> chunk.getObject().overshadows(oldSegment)));
        }, "See a new version");
    }
    if (waitForSegmentsToLoad) {
        ITRetryUtil.retryUntilTrue(() -> coordinator.areSegmentsLoaded(dataSourceName), "Segment Load");
    }
}
Also used : TaskReport(org.apache.druid.indexing.common.TaskReport) Logger(org.apache.druid.java.util.common.logger.Logger) Intervals(org.apache.druid.java.util.common.Intervals) ClientInfoResourceTestClient(org.apache.druid.testing.clients.ClientInfoResourceTestClient) Inject(com.google.inject.Inject) Function(java.util.function.Function) PartialDimensionDistributionTask(org.apache.druid.indexing.common.task.batch.parallel.PartialDimensionDistributionTask) PartialRangeSegmentGenerateTask(org.apache.druid.indexing.common.task.batch.parallel.PartialRangeSegmentGenerateTask) Pair(org.apache.druid.java.util.common.Pair) ArrayList(java.util.ArrayList) Assert(org.testng.Assert) FluentIterable(com.google.common.collect.FluentIterable) PartialDimensionCardinalityTask(org.apache.druid.indexing.common.task.batch.parallel.PartialDimensionCardinalityTask) SecondaryPartitionType(org.apache.druid.indexer.partitions.SecondaryPartitionType) IntegrationTestingConfig(org.apache.druid.testing.IntegrationTestingConfig) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ITRetryUtil(org.apache.druid.testing.utils.ITRetryUtil) IngestionStatsAndErrorsTaskReportData(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReportData) StringUtils(org.apache.druid.java.util.common.StringUtils) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) PartialHashSegmentGenerateTask(org.apache.druid.indexing.common.task.batch.parallel.PartialHashSegmentGenerateTask) ISE(org.apache.druid.java.util.common.ISE) IOException(java.io.IOException) StandardCharsets(java.nio.charset.StandardCharsets) IOUtils(org.apache.commons.io.IOUtils) List(java.util.List) SinglePhaseSubTask(org.apache.druid.indexing.common.task.batch.parallel.SinglePhaseSubTask) IngestionStatsAndErrorsTaskReport(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReport) PartialGenericSegmentMergeTask(org.apache.druid.indexing.common.task.batch.parallel.PartialGenericSegmentMergeTask) DataSegment(org.apache.druid.timeline.DataSegment) SqlTestQueryHelper(org.apache.druid.testing.utils.SqlTestQueryHelper) InputStream(java.io.InputStream) IngestionStatsAndErrorsTaskReport(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReport) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) TaskReport(org.apache.druid.indexing.common.TaskReport) IngestionStatsAndErrorsTaskReport(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReport) IngestionStatsAndErrorsTaskReportData(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReportData) DataSegment(org.apache.druid.timeline.DataSegment)

Example 13 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class ServerManagerForQueryErrorTest method buildQueryRunnerForSegment.

@Override
protected <T> QueryRunner<T> buildQueryRunnerForSegment(Query<T> query, SegmentDescriptor descriptor, QueryRunnerFactory<T, Query<T>> factory, QueryToolChest<T, Query<T>> toolChest, VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline, Function<SegmentReference, SegmentReference> segmentMapFn, AtomicLong cpuTimeAccumulator, Optional<byte[]> cacheKeyPrefix) {
    if (query.getContextBoolean(QUERY_RETRY_TEST_CONTEXT_KEY, false)) {
        final MutableBoolean isIgnoreSegment = new MutableBoolean(false);
        queryToIgnoredSegments.compute(query.getMostSpecificId(), (queryId, ignoredSegments) -> {
            if (ignoredSegments == null) {
                ignoredSegments = new HashSet<>();
            }
            if (ignoredSegments.size() < MAX_NUM_FALSE_MISSING_SEGMENTS_REPORTS) {
                ignoredSegments.add(descriptor);
                isIgnoreSegment.setTrue();
            }
            return ignoredSegments;
        });
        if (isIgnoreSegment.isTrue()) {
            LOG.info("Pretending I don't have segment[%s]", descriptor);
            return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
        }
    } else if (query.getContextBoolean(QUERY_TIMEOUT_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw new QueryTimeoutException("query timeout test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw new QueryTimeoutException("query timeout test");
            }
        };
    } else if (query.getContextBoolean(QUERY_CAPACITY_EXCEEDED_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw QueryCapacityExceededException.withErrorMessageAndResolvedHost("query capacity exceeded test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw QueryCapacityExceededException.withErrorMessageAndResolvedHost("query capacity exceeded test");
            }
        };
    } else if (query.getContextBoolean(QUERY_UNSUPPORTED_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw new QueryUnsupportedException("query unsupported test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw new QueryUnsupportedException("query unsupported test");
            }
        };
    } else if (query.getContextBoolean(RESOURCE_LIMIT_EXCEEDED_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw new ResourceLimitExceededException("resource limit exceeded test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw new ResourceLimitExceededException("resource limit exceeded test");
            }
        };
    } else if (query.getContextBoolean(QUERY_FAILURE_TEST_CONTEXT_KEY, false)) {
        return (queryPlus, responseContext) -> new Sequence<T>() {

            @Override
            public <OutType> OutType accumulate(OutType initValue, Accumulator<OutType, T> accumulator) {
                throw new RuntimeException("query failure test");
            }

            @Override
            public <OutType> Yielder<OutType> toYielder(OutType initValue, YieldingAccumulator<OutType, T> accumulator) {
                throw new RuntimeException("query failure test");
            }
        };
    }
    return super.buildQueryRunnerForSegment(query, descriptor, factory, toolChest, timeline, segmentMapFn, cpuTimeAccumulator, cacheKeyPrefix);
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) SegmentManager(org.apache.druid.server.SegmentManager) Inject(com.google.inject.Inject) Smile(org.apache.druid.guice.annotations.Smile) QueryProcessingPool(org.apache.druid.query.QueryProcessingPool) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) Function(java.util.function.Function) QueryCapacityExceededException(org.apache.druid.query.QueryCapacityExceededException) HashSet(java.util.HashSet) SegmentReference(org.apache.druid.segment.SegmentReference) Query(org.apache.druid.query.Query) QueryRunner(org.apache.druid.query.QueryRunner) CachePopulator(org.apache.druid.client.cache.CachePopulator) Yielder(org.apache.druid.java.util.common.guava.Yielder) Sequence(org.apache.druid.java.util.common.guava.Sequence) YieldingAccumulator(org.apache.druid.java.util.common.guava.YieldingAccumulator) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServerConfig(org.apache.druid.server.initialization.ServerConfig) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CacheConfig(org.apache.druid.client.cache.CacheConfig) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) QueryToolChest(org.apache.druid.query.QueryToolChest) Set(java.util.Set) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) AtomicLong(java.util.concurrent.atomic.AtomicLong) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) Optional(java.util.Optional) MutableBoolean(org.apache.commons.lang3.mutable.MutableBoolean) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Cache(org.apache.druid.client.cache.Cache) Accumulator(org.apache.druid.java.util.common.guava.Accumulator) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) YieldingAccumulator(org.apache.druid.java.util.common.guava.YieldingAccumulator) Accumulator(org.apache.druid.java.util.common.guava.Accumulator) Yielder(org.apache.druid.java.util.common.guava.Yielder) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) MutableBoolean(org.apache.commons.lang3.mutable.MutableBoolean) Sequence(org.apache.druid.java.util.common.guava.Sequence) YieldingAccumulator(org.apache.druid.java.util.common.guava.YieldingAccumulator) QueryTimeoutException(org.apache.druid.query.QueryTimeoutException) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException)

Example 14 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class NewestSegmentFirstIterator method findInitialSearchInterval.

/**
 * Returns the initial searchInterval which is {@code (timeline.first().start, timeline.last().end - skipOffset)}.
 *
 * @param timeline      timeline of a dataSource
 * @param skipIntervals intervals to skip
 *
 * @return found interval to search or null if it's not found
 */
private static List<Interval> findInitialSearchInterval(VersionedIntervalTimeline<String, DataSegment> timeline, Period skipOffset, Granularity configuredSegmentGranularity, @Nullable List<Interval> skipIntervals) {
    Preconditions.checkArgument(timeline != null && !timeline.isEmpty(), "timeline should not be null or empty");
    Preconditions.checkNotNull(skipOffset, "skipOffset");
    final TimelineObjectHolder<String, DataSegment> first = Preconditions.checkNotNull(timeline.first(), "first");
    final TimelineObjectHolder<String, DataSegment> last = Preconditions.checkNotNull(timeline.last(), "last");
    final List<Interval> fullSkipIntervals = sortAndAddSkipIntervalFromLatest(last.getInterval().getEnd(), skipOffset, configuredSegmentGranularity, skipIntervals);
    final Interval totalInterval = new Interval(first.getInterval().getStart(), last.getInterval().getEnd());
    final List<Interval> filteredInterval = filterSkipIntervals(totalInterval, fullSkipIntervals);
    final List<Interval> searchIntervals = new ArrayList<>();
    for (Interval lookupInterval : filteredInterval) {
        final List<DataSegment> segments = timeline.findNonOvershadowedObjectsInInterval(lookupInterval, Partitions.ONLY_COMPLETE).stream().filter(segment -> lookupInterval.contains(segment.getInterval())).collect(Collectors.toList());
        if (segments.isEmpty()) {
            continue;
        }
        DateTime searchStart = segments.stream().map(segment -> segment.getId().getIntervalStart()).min(Comparator.naturalOrder()).orElseThrow(AssertionError::new);
        DateTime searchEnd = segments.stream().map(segment -> segment.getId().getIntervalEnd()).max(Comparator.naturalOrder()).orElseThrow(AssertionError::new);
        searchIntervals.add(new Interval(searchStart, searchEnd));
    }
    return searchIntervals;
}
Also used : Arrays(java.util.Arrays) Comparators(org.apache.druid.java.util.common.guava.Comparators) NumberedPartitionChunk(org.apache.druid.timeline.partition.NumberedPartitionChunk) Partitions(org.apache.druid.timeline.Partitions) PriorityQueue(java.util.PriorityQueue) IndexSpec(org.apache.druid.segment.IndexSpec) CompactionState(org.apache.druid.timeline.CompactionState) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) JodaUtils(org.apache.druid.java.util.common.JodaUtils) Map(java.util.Map) DynamicPartitionsSpec(org.apache.druid.indexer.partitions.DynamicPartitionsSpec) DateTimes(org.apache.druid.java.util.common.DateTimes) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) ClientCompactionTaskQueryTuningConfig(org.apache.druid.client.indexing.ClientCompactionTaskQueryTuningConfig) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) List(java.util.List) DimensionSchema(org.apache.druid.data.input.impl.DimensionSchema) DimFilter(org.apache.druid.query.filter.DimFilter) DataSegment(org.apache.druid.timeline.DataSegment) Logger(org.apache.druid.java.util.common.logger.Logger) Streams(org.apache.druid.utils.Streams) Granularity(org.apache.druid.java.util.common.granularity.Granularity) Intervals(org.apache.druid.java.util.common.Intervals) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) NoSuchElementException(java.util.NoSuchElementException) PartitionsSpec(org.apache.druid.indexer.partitions.PartitionsSpec) Nullable(javax.annotation.Nullable) ClientCompactionTaskTransformSpec(org.apache.druid.client.indexing.ClientCompactionTaskTransformSpec) Period(org.joda.time.Period) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) Iterator(java.util.Iterator) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) CompactionStatistics(org.apache.druid.server.coordinator.CompactionStatistics) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) Maps(com.google.common.collect.Maps) ClientCompactionTaskGranularitySpec(org.apache.druid.client.indexing.ClientCompactionTaskGranularitySpec) SegmentUtils(org.apache.druid.segment.SegmentUtils) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Collections(java.util.Collections) ArrayUtils(org.apache.commons.lang.ArrayUtils) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) Interval(org.joda.time.Interval)

Example 15 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class SimpleServerView method addSegmentToServer.

private void addSegmentToServer(DruidServer server, DataSegment segment) {
    final ServerSelector selector = selectors.computeIfAbsent(segment.getId().toString(), k -> new ServerSelector(segment, tierSelectorStrategy));
    selector.addServerAndUpdateSegment(servers.get(server), segment);
    timelines.computeIfAbsent(segment.getDataSource(), k -> new VersionedIntervalTimeline<>(Ordering.natural())).add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(selector));
}
Also used : DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) HighestPriorityTierSelectorStrategy(org.apache.druid.client.selector.HighestPriorityTierSelectorStrategy) HttpClient(org.apache.druid.java.util.http.client.HttpClient) HashMap(java.util.HashMap) ServerSelector(org.apache.druid.client.selector.ServerSelector) QueryWatcher(org.apache.druid.query.QueryWatcher) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) TierSelectorStrategy(org.apache.druid.client.selector.TierSelectorStrategy) QueryRunner(org.apache.druid.query.QueryRunner) QueryToolChestWarehouse(org.apache.druid.query.QueryToolChestWarehouse) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) TimelineLookup(org.apache.druid.timeline.TimelineLookup) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) Executor(java.util.concurrent.Executor) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ISE(org.apache.druid.java.util.common.ISE) TableDataSource(org.apache.druid.query.TableDataSource) List(java.util.List) RandomServerSelectorStrategy(org.apache.druid.client.selector.RandomServerSelectorStrategy) Ordering(com.google.common.collect.Ordering) DataSegment(org.apache.druid.timeline.DataSegment) Optional(java.util.Optional) Preconditions(com.google.common.base.Preconditions) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) Collections(java.util.Collections) ServerSelector(org.apache.druid.client.selector.ServerSelector) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline)

Aggregations

VersionedIntervalTimeline (org.apache.druid.timeline.VersionedIntervalTimeline)19 DataSegment (org.apache.druid.timeline.DataSegment)10 ArrayList (java.util.ArrayList)9 Interval (org.joda.time.Interval)9 List (java.util.List)8 Map (java.util.Map)8 ReferenceCountingSegment (org.apache.druid.segment.ReferenceCountingSegment)7 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)6 Intervals (org.apache.druid.java.util.common.Intervals)6 Logger (org.apache.druid.java.util.common.logger.Logger)6 Test (org.junit.Test)6 ImmutableMap (com.google.common.collect.ImmutableMap)5 Collections (java.util.Collections)5 HashMap (java.util.HashMap)5 Optional (java.util.Optional)5 ISE (org.apache.druid.java.util.common.ISE)5 QueryRunner (org.apache.druid.query.QueryRunner)5 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)5 TableDataSource (org.apache.druid.query.TableDataSource)5 DataSourceAnalysis (org.apache.druid.query.planning.DataSourceAnalysis)5