Search in sources :

Example 1 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class CompactSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    LOG.info("Compact segments");
    final CoordinatorCompactionConfig dynamicConfig = params.getCoordinatorCompactionConfig();
    final CoordinatorStats stats = new CoordinatorStats();
    List<DataSourceCompactionConfig> compactionConfigList = dynamicConfig.getCompactionConfigs();
    if (dynamicConfig.getMaxCompactionTaskSlots() > 0) {
        Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = params.getUsedSegmentsTimelinesPerDataSource();
        if (compactionConfigList != null && !compactionConfigList.isEmpty()) {
            Map<String, DataSourceCompactionConfig> compactionConfigs = compactionConfigList.stream().collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
            final List<TaskStatusPlus> compactionTasks = filterNonCompactionTasks(indexingServiceClient.getActiveTasks());
            // dataSource -> list of intervals for which compaction will be skipped in this run
            final Map<String, List<Interval>> intervalsToSkipCompaction = new HashMap<>();
            int numEstimatedNonCompleteCompactionTasks = 0;
            for (TaskStatusPlus status : compactionTasks) {
                final TaskPayloadResponse response = indexingServiceClient.getTaskPayload(status.getId());
                if (response == null) {
                    throw new ISE("Got a null paylord from overlord for task[%s]", status.getId());
                }
                if (COMPACTION_TASK_TYPE.equals(response.getPayload().getType())) {
                    final ClientCompactionTaskQuery compactionTaskQuery = (ClientCompactionTaskQuery) response.getPayload();
                    DataSourceCompactionConfig dataSourceCompactionConfig = compactionConfigs.get(status.getDataSource());
                    if (dataSourceCompactionConfig != null && dataSourceCompactionConfig.getGranularitySpec() != null) {
                        Granularity configuredSegmentGranularity = dataSourceCompactionConfig.getGranularitySpec().getSegmentGranularity();
                        if (configuredSegmentGranularity != null && compactionTaskQuery.getGranularitySpec() != null && !configuredSegmentGranularity.equals(compactionTaskQuery.getGranularitySpec().getSegmentGranularity())) {
                            // We will cancel active compaction task if segmentGranularity changes and we will need to
                            // re-compact the interval
                            LOG.info("Canceled task[%s] as task segmentGranularity is [%s] but compaction config " + "segmentGranularity is [%s]", status.getId(), compactionTaskQuery.getGranularitySpec().getSegmentGranularity(), configuredSegmentGranularity);
                            indexingServiceClient.cancelTask(status.getId());
                            continue;
                        }
                    }
                    // Skip interval as the current active compaction task is good
                    final Interval interval = compactionTaskQuery.getIoConfig().getInputSpec().getInterval();
                    intervalsToSkipCompaction.computeIfAbsent(status.getDataSource(), k -> new ArrayList<>()).add(interval);
                    // Since we keep the current active compaction task running, we count the active task slots
                    numEstimatedNonCompleteCompactionTasks += findMaxNumTaskSlotsUsedByOneCompactionTask(compactionTaskQuery.getTuningConfig());
                } else {
                    throw new ISE("task[%s] is not a compactionTask", status.getId());
                }
            }
            // Skip all the intervals locked by higher priority tasks for each datasource
            // This must be done after the invalid compaction tasks are cancelled
            // in the loop above so that their intervals are not considered locked
            getLockedIntervalsToSkip(compactionConfigList).forEach((dataSource, intervals) -> intervalsToSkipCompaction.computeIfAbsent(dataSource, ds -> new ArrayList<>()).addAll(intervals));
            final CompactionSegmentIterator iterator = policy.reset(compactionConfigs, dataSources, intervalsToSkipCompaction);
            int totalCapacity;
            if (dynamicConfig.isUseAutoScaleSlots()) {
                try {
                    totalCapacity = indexingServiceClient.getTotalWorkerCapacityWithAutoScale();
                } catch (Exception e) {
                    LOG.warn("Failed to get total worker capacity with auto scale slots. Falling back to current capacity count");
                    totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
                }
            } else {
                totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
            }
            final int compactionTaskCapacity = (int) Math.min(totalCapacity * dynamicConfig.getCompactionTaskSlotRatio(), dynamicConfig.getMaxCompactionTaskSlots());
            final int numAvailableCompactionTaskSlots;
            if (numEstimatedNonCompleteCompactionTasks > 0) {
                numAvailableCompactionTaskSlots = Math.max(0, compactionTaskCapacity - numEstimatedNonCompleteCompactionTasks);
            } else {
                // compactionTaskCapacity might be 0 if totalWorkerCapacity is low.
                // This guarantees that at least one slot is available if
                // compaction is enabled and numEstimatedNonCompleteCompactionTasks is 0.
                numAvailableCompactionTaskSlots = Math.max(1, compactionTaskCapacity);
            }
            LOG.info("Found [%d] available task slots for compaction out of [%d] max compaction task capacity", numAvailableCompactionTaskSlots, compactionTaskCapacity);
            stats.addToGlobalStat(AVAILABLE_COMPACTION_TASK_SLOT, numAvailableCompactionTaskSlots);
            stats.addToGlobalStat(MAX_COMPACTION_TASK_SLOT, compactionTaskCapacity);
            final Map<String, AutoCompactionSnapshot.Builder> currentRunAutoCompactionSnapshotBuilders = new HashMap<>();
            if (numAvailableCompactionTaskSlots > 0) {
                stats.accumulate(doRun(compactionConfigs, currentRunAutoCompactionSnapshotBuilders, numAvailableCompactionTaskSlots, iterator));
            } else {
                stats.accumulate(makeStats(currentRunAutoCompactionSnapshotBuilders, 0, iterator));
            }
        } else {
            LOG.info("compactionConfig is empty. Skip.");
            autoCompactionSnapshotPerDataSource.set(new HashMap<>());
        }
    } else {
        LOG.info("maxCompactionTaskSlots was set to 0. Skip compaction");
        autoCompactionSnapshotPerDataSource.set(new HashMap<>());
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) Granularity(org.apache.druid.java.util.common.granularity.Granularity) Inject(com.google.inject.Inject) ClientCompactionTaskDimensionsSpec(org.apache.druid.client.indexing.ClientCompactionTaskDimensionsSpec) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) IndexingServiceClient(org.apache.druid.client.indexing.IndexingServiceClient) HashMap(java.util.HashMap) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) AutoCompactionSnapshot(org.apache.druid.server.coordinator.AutoCompactionSnapshot) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) ArrayList(java.util.ArrayList) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) Interval(org.joda.time.Interval) DruidCoordinatorConfig(org.apache.druid.server.coordinator.DruidCoordinatorConfig) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) DimensionRangePartitionsSpec(org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec) Nullable(javax.annotation.Nullable) ClientCompactionTaskTransformSpec(org.apache.druid.client.indexing.ClientCompactionTaskTransformSpec) JacksonInject(com.fasterxml.jackson.annotation.JacksonInject) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ClientCompactionTaskQueryTuningConfig(org.apache.druid.client.indexing.ClientCompactionTaskQueryTuningConfig) CompactionStatistics(org.apache.druid.server.coordinator.CompactionStatistics) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) List(java.util.List) ClientCompactionTaskGranularitySpec(org.apache.druid.client.indexing.ClientCompactionTaskGranularitySpec) GranularityType(org.apache.druid.java.util.common.granularity.GranularityType) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) DataSegment(org.apache.druid.timeline.DataSegment) VisibleForTesting(com.google.common.annotations.VisibleForTesting) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) ArrayList(java.util.ArrayList) List(java.util.List) ISE(org.apache.druid.java.util.common.ISE) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) Interval(org.joda.time.Interval)

Example 2 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class MarkAsUnusedOvershadowedSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    // Mark as unused overshadowed segments only if we've had enough time to make sure we aren't flapping with old data.
    if (!params.coordinatorIsLeadingEnoughTimeToMarkAsUnusedOvershadowedSegements()) {
        return params;
    }
    CoordinatorStats stats = new CoordinatorStats();
    DruidCluster cluster = params.getDruidCluster();
    Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines = new HashMap<>();
    for (SortedSet<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            addSegmentsFromServer(serverHolder, timelines);
        }
    }
    for (ServerHolder serverHolder : cluster.getBrokers()) {
        addSegmentsFromServer(serverHolder, timelines);
    }
    // Mark all segments as unused in db that are overshadowed by served segments
    for (DataSegment dataSegment : params.getUsedSegments()) {
        VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSegment.getDataSource());
        if (timeline != null && timeline.isOvershadowed(dataSegment.getInterval(), dataSegment.getVersion(), dataSegment)) {
            coordinator.markSegmentAsUnused(dataSegment);
            stats.addToGlobalStat("overShadowedCount", 1);
        }
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) HashMap(java.util.HashMap) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment)

Example 3 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class NewestSegmentFirstPolicyTest method createTimeline.

private static VersionedIntervalTimeline<String, DataSegment> createTimeline(SegmentGenerateSpec... specs) {
    List<DataSegment> segments = new ArrayList<>();
    final String version = DateTimes.nowUtc().toString();
    final List<SegmentGenerateSpec> orderedSpecs = Arrays.asList(specs);
    orderedSpecs.sort(Comparator.comparing(s -> s.totalInterval, Comparators.intervalsByStartThenEnd().reversed()));
    for (SegmentGenerateSpec spec : orderedSpecs) {
        Interval remainingInterval = spec.totalInterval;
        while (!Intervals.isEmpty(remainingInterval)) {
            final Interval segmentInterval;
            if (remainingInterval.toDuration().isLongerThan(spec.segmentPeriod.toStandardDuration())) {
                segmentInterval = new Interval(spec.segmentPeriod, remainingInterval.getEnd());
            } else {
                segmentInterval = remainingInterval;
            }
            for (int i = 0; i < spec.numSegmentsPerShard; i++) {
                final ShardSpec shardSpec = new NumberedShardSpec(i, spec.numSegmentsPerShard);
                final DataSegment segment = new DataSegment(DATA_SOURCE, segmentInterval, spec.version == null ? version : spec.version, null, ImmutableList.of(), ImmutableList.of(), shardSpec, spec.lastCompactionState, 0, spec.segmentSize);
                segments.add(segment);
            }
            remainingInterval = SegmentCompactionUtil.removeIntervalFromEnd(remainingInterval, segmentInterval);
        }
    }
    return VersionedIntervalTimeline.forSegments(segments);
}
Also used : DateTimeZone(org.joda.time.DateTimeZone) Arrays(java.util.Arrays) Comparators(org.apache.druid.java.util.common.guava.Comparators) Partitions(org.apache.druid.timeline.Partitions) UserCompactionTaskDimensionsConfig(org.apache.druid.server.coordinator.UserCompactionTaskDimensionsConfig) IndexSpec(org.apache.druid.segment.IndexSpec) CompactionState(org.apache.druid.timeline.CompactionState) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Map(java.util.Map) Assertions(org.assertj.core.api.Assertions) TypeReference(com.fasterxml.jackson.core.type.TypeReference) ConciseBitmapSerdeFactory(org.apache.druid.segment.data.ConciseBitmapSerdeFactory) DateTimes(org.apache.druid.java.util.common.DateTimes) ShardSpec(org.apache.druid.timeline.partition.ShardSpec) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) TimeZone(java.util.TimeZone) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) ClientCompactionTaskQueryTuningConfig(org.apache.druid.client.indexing.ClientCompactionTaskQueryTuningConfig) TestExprMacroTable(org.apache.druid.query.expression.TestExprMacroTable) Collectors(java.util.stream.Collectors) UserCompactionTaskTransformConfig(org.apache.druid.server.coordinator.UserCompactionTaskTransformConfig) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) List(java.util.List) DataSegment(org.apache.druid.timeline.DataSegment) TransformSpec(org.apache.druid.segment.transform.TransformSpec) Iterables(com.google.common.collect.Iterables) InjectableValues(com.fasterxml.jackson.databind.InjectableValues) Intervals(org.apache.druid.java.util.common.Intervals) ArrayList(java.util.ArrayList) Interval(org.joda.time.Interval) ImmutableList(com.google.common.collect.ImmutableList) UserCompactionTaskGranularityConfig(org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig) PartitionsSpec(org.apache.druid.indexer.partitions.PartitionsSpec) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Period(org.joda.time.Period) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) Test(org.junit.Test) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) Granularities(org.apache.druid.java.util.common.granularity.Granularities) NullHandling(org.apache.druid.common.config.NullHandling) Preconditions(com.google.common.base.Preconditions) Assert(org.junit.Assert) Comparator(java.util.Comparator) Collections(java.util.Collections) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) ShardSpec(org.apache.druid.timeline.partition.ShardSpec) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) Interval(org.joda.time.Interval)

Example 4 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class TimeBoundaryQueryRunnerTest method getCustomRunner.

private QueryRunner getCustomRunner() throws IOException {
    CharSource v_0112 = CharSource.wrap(StringUtils.join(V_0112, "\n"));
    CharSource v_0113 = CharSource.wrap(StringUtils.join(V_0113, "\n"));
    IncrementalIndex index0 = TestIndex.loadIncrementalIndex(newIndex("2011-01-12T00:00:00.000Z"), v_0112);
    IncrementalIndex index1 = TestIndex.loadIncrementalIndex(newIndex("2011-01-14T00:00:00.000Z"), v_0113);
    segment0 = new IncrementalIndexSegment(index0, makeIdentifier(index0, "v1"));
    segment1 = new IncrementalIndexSegment(index1, makeIdentifier(index1, "v1"));
    VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline = new VersionedIntervalTimeline<>(StringComparators.LEXICOGRAPHIC);
    timeline.add(index0.getInterval(), "v1", new SingleElementPartitionChunk<>(ReferenceCountingSegment.wrapRootGenerationSegment(segment0)));
    timeline.add(index1.getInterval(), "v1", new SingleElementPartitionChunk<>(ReferenceCountingSegment.wrapRootGenerationSegment(segment1)));
    return QueryRunnerTestHelper.makeFilteringQueryRunner(timeline, FACTORY);
}
Also used : ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) CharSource(com.google.common.io.CharSource) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline)

Example 5 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class CachingClusteredClientPerfTest method testGetQueryRunnerForSegments_singleIntervalLargeSegments.

@Test(timeout = 10_000)
public void testGetQueryRunnerForSegments_singleIntervalLargeSegments() {
    final int segmentCount = 30_000;
    final Interval interval = Intervals.of("2021-02-13/2021-02-14");
    final List<SegmentDescriptor> segmentDescriptors = new ArrayList<>(segmentCount);
    final List<DataSegment> dataSegments = new ArrayList<>(segmentCount);
    final VersionedIntervalTimeline<String, ServerSelector> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
    final DruidServer server = new DruidServer("server", "localhost:9000", null, Long.MAX_VALUE, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_PRIORITY);
    for (int ii = 0; ii < segmentCount; ii++) {
        segmentDescriptors.add(new SegmentDescriptor(interval, "1", ii));
        DataSegment segment = makeDataSegment("test", interval, "1", ii);
        dataSegments.add(segment);
    }
    timeline.addAll(Iterators.transform(dataSegments.iterator(), segment -> {
        ServerSelector ss = new ServerSelector(segment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
        ss.addServerAndUpdateSegment(new QueryableDruidServer(server, new MockQueryRunner()), segment);
        return new VersionedIntervalTimeline.PartitionChunkEntry<>(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(ss));
    }));
    TimelineServerView serverView = Mockito.mock(TimelineServerView.class);
    QueryScheduler queryScheduler = Mockito.mock(QueryScheduler.class);
    // mock scheduler to return same sequence as argument
    Mockito.when(queryScheduler.run(any(), any())).thenAnswer(i -> i.getArgument(1));
    Mockito.when(queryScheduler.prioritizeAndLaneQuery(any(), any())).thenAnswer(i -> ((QueryPlus) i.getArgument(0)).getQuery());
    Mockito.doReturn(Optional.of(timeline)).when(serverView).getTimeline(any());
    Mockito.doReturn(new MockQueryRunner()).when(serverView).getQueryRunner(any());
    CachingClusteredClient cachingClusteredClient = new CachingClusteredClient(new MockQueryToolChestWareHouse(), serverView, MapCache.create(1024), TestHelper.makeJsonMapper(), Mockito.mock(CachePopulator.class), new CacheConfig(), Mockito.mock(DruidHttpClientConfig.class), Mockito.mock(DruidProcessingConfig.class), ForkJoinPool.commonPool(), queryScheduler, NoopJoinableFactory.INSTANCE, new NoopServiceEmitter());
    Query<SegmentDescriptor> fakeQuery = makeFakeQuery(interval);
    QueryRunner<SegmentDescriptor> queryRunner = cachingClusteredClient.getQueryRunnerForSegments(fakeQuery, segmentDescriptors);
    Sequence<SegmentDescriptor> sequence = queryRunner.run(QueryPlus.wrap(fakeQuery));
    Assert.assertEquals(segmentDescriptors, sequence.toList());
}
Also used : QueryPlus(org.apache.druid.query.QueryPlus) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) QueryRunner(org.apache.druid.query.QueryRunner) QueryToolChestWarehouse(org.apache.druid.query.QueryToolChestWarehouse) NoopJoinableFactory(org.apache.druid.segment.join.NoopJoinableFactory) Sequence(org.apache.druid.java.util.common.guava.Sequence) QueryScheduler(org.apache.druid.server.QueryScheduler) ImmutableMap(com.google.common.collect.ImmutableMap) DataSource(org.apache.druid.query.DataSource) CacheConfig(org.apache.druid.client.cache.CacheConfig) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) QuerySegmentSpec(org.apache.druid.query.spec.QuerySegmentSpec) DruidHttpClientConfig(org.apache.druid.guice.http.DruidHttpClientConfig) List(java.util.List) DimFilter(org.apache.druid.query.filter.DimFilter) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) DataSegment(org.apache.druid.timeline.DataSegment) ServerManagerTest(org.apache.druid.server.coordination.ServerManagerTest) Optional(java.util.Optional) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) MapCache(org.apache.druid.client.cache.MapCache) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) HighestPriorityTierSelectorStrategy(org.apache.druid.client.selector.HighestPriorityTierSelectorStrategy) Intervals(org.apache.druid.java.util.common.Intervals) TestSequence(org.apache.druid.java.util.common.guava.TestSequence) BaseQuery(org.apache.druid.query.BaseQuery) Iterators(com.google.common.collect.Iterators) ArrayList(java.util.ArrayList) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ServerSelector(org.apache.druid.client.selector.ServerSelector) Interval(org.joda.time.Interval) Query(org.apache.druid.query.Query) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) CachePopulator(org.apache.druid.client.cache.CachePopulator) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ResponseContext(org.apache.druid.query.context.ResponseContext) QueryToolChest(org.apache.druid.query.QueryToolChest) Test(org.junit.Test) TableDataSource(org.apache.druid.query.TableDataSource) Mockito(org.mockito.Mockito) TestHelper(org.apache.druid.segment.TestHelper) RandomServerSelectorStrategy(org.apache.druid.client.selector.RandomServerSelectorStrategy) Ordering(com.google.common.collect.Ordering) ForkJoinPool(java.util.concurrent.ForkJoinPool) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Assert(org.junit.Assert) Collections(java.util.Collections) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) DruidHttpClientConfig(org.apache.druid.guice.http.DruidHttpClientConfig) ServerSelector(org.apache.druid.client.selector.ServerSelector) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) HighestPriorityTierSelectorStrategy(org.apache.druid.client.selector.HighestPriorityTierSelectorStrategy) CacheConfig(org.apache.druid.client.cache.CacheConfig) QueryScheduler(org.apache.druid.server.QueryScheduler) QueryableDruidServer(org.apache.druid.client.selector.QueryableDruidServer) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) CachePopulator(org.apache.druid.client.cache.CachePopulator) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) DruidProcessingConfig(org.apache.druid.query.DruidProcessingConfig) RandomServerSelectorStrategy(org.apache.druid.client.selector.RandomServerSelectorStrategy) Interval(org.joda.time.Interval) ServerManagerTest(org.apache.druid.server.coordination.ServerManagerTest) Test(org.junit.Test)

Aggregations

VersionedIntervalTimeline (org.apache.druid.timeline.VersionedIntervalTimeline)19 DataSegment (org.apache.druid.timeline.DataSegment)10 ArrayList (java.util.ArrayList)9 Interval (org.joda.time.Interval)9 List (java.util.List)8 Map (java.util.Map)8 ReferenceCountingSegment (org.apache.druid.segment.ReferenceCountingSegment)7 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)6 Intervals (org.apache.druid.java.util.common.Intervals)6 Logger (org.apache.druid.java.util.common.logger.Logger)6 Test (org.junit.Test)6 ImmutableMap (com.google.common.collect.ImmutableMap)5 Collections (java.util.Collections)5 HashMap (java.util.HashMap)5 Optional (java.util.Optional)5 ISE (org.apache.druid.java.util.common.ISE)5 QueryRunner (org.apache.druid.query.QueryRunner)5 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)5 TableDataSource (org.apache.druid.query.TableDataSource)5 DataSourceAnalysis (org.apache.druid.query.planning.DataSourceAnalysis)5