Search in sources :

Example 41 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class ScanQueryResultOrderingTest method assertResultsEquals.

private void assertResultsEquals(final ScanQuery query, final List<Integer> expectedResults) {
    final List<List<Pair<SegmentId, QueryRunner<ScanResultValue>>>> serverRunners = new ArrayList<>();
    for (int i = 0; i <= segmentToServerMap.stream().max(Comparator.naturalOrder()).orElse(0); i++) {
        serverRunners.add(new ArrayList<>());
    }
    for (int segmentNumber = 0; segmentNumber < segmentToServerMap.size(); segmentNumber++) {
        final SegmentId segmentId = SEGMENTS.get(segmentNumber).getId();
        final int serverNumber = segmentToServerMap.get(segmentNumber);
        serverRunners.get(serverNumber).add(Pair.of(segmentId, segmentRunners.get(segmentNumber)));
    }
    // Simulates what the Historical servers would do.
    final List<QueryRunner<ScanResultValue>> mergedServerRunners = serverRunners.stream().filter(runners -> !runners.isEmpty()).map(runners -> queryRunnerFactory.getToolchest().mergeResults(new QueryRunner<ScanResultValue>() {

        @Override
        public Sequence<ScanResultValue> run(final QueryPlus<ScanResultValue> queryPlus, final ResponseContext responseContext) {
            return queryRunnerFactory.mergeRunners(Execs.directExecutor(), runners.stream().map(p -> p.rhs).collect(Collectors.toList())).run(queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleSpecificSegmentSpec(runners.stream().map(p -> p.lhs.toDescriptor()).collect(Collectors.toList())))), responseContext);
        }
    })).collect(Collectors.toList());
    // Simulates what the Broker would do.
    final QueryRunner<ScanResultValue> brokerRunner = queryRunnerFactory.getToolchest().mergeResults((queryPlus, responseContext) -> {
        final List<Sequence<ScanResultValue>> sequences = mergedServerRunners.stream().map(runner -> runner.run(queryPlus.withoutThreadUnsafeState())).collect(Collectors.toList());
        return new MergeSequence<>(queryPlus.getQuery().getResultOrdering(), Sequences.simple(sequences));
    });
    // Finally: run the query.
    final List<Integer> results = runQuery((ScanQuery) Druids.ScanQueryBuilder.copy(query).limit(limit).batchSize(batchSize).build().withOverriddenContext(ImmutableMap.of(ScanQueryConfig.CTX_KEY_MAX_ROWS_QUEUED_FOR_ORDERING, maxRowsQueuedForOrdering)), brokerRunner);
    Assert.assertEquals(expectedResults.stream().limit(limit == 0 ? Long.MAX_VALUE : limit).collect(Collectors.toList()), results);
}
Also used : IntStream(java.util.stream.IntStream) QueryPlus(org.apache.druid.query.QueryPlus) Intervals(org.apache.druid.java.util.common.Intervals) RowBasedSegment(org.apache.druid.segment.RowBasedSegment) RunWith(org.junit.runner.RunWith) TreeSet(java.util.TreeSet) Pair(org.apache.druid.java.util.common.Pair) ArrayList(java.util.ArrayList) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) Druids(org.apache.druid.query.Druids) RowAdapter(org.apache.druid.segment.RowAdapter) ColumnHolder(org.apache.druid.segment.column.ColumnHolder) ImmutableList(com.google.common.collect.ImmutableList) QueryRunner(org.apache.druid.query.QueryRunner) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) Sequences(org.apache.druid.java.util.common.guava.Sequences) Parameterized(org.junit.runners.Parameterized) Before(org.junit.Before) ImmutableSortedSet(com.google.common.collect.ImmutableSortedSet) DateTimes(org.apache.druid.java.util.common.DateTimes) Sequence(org.apache.druid.java.util.common.guava.Sequence) Execs(org.apache.druid.java.util.common.concurrent.Execs) ImmutableMap(com.google.common.collect.ImmutableMap) MergeSequence(org.apache.druid.java.util.common.guava.MergeSequence) ResponseContext(org.apache.druid.query.context.ResponseContext) DateTime(org.joda.time.DateTime) Set(java.util.Set) Test(org.junit.Test) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) List(java.util.List) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) DefaultGenericQueryMetricsFactory(org.apache.druid.query.DefaultGenericQueryMetricsFactory) SegmentId(org.apache.druid.timeline.SegmentId) Assert(org.junit.Assert) Comparator(java.util.Comparator) Collections(java.util.Collections) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) SegmentId(org.apache.druid.timeline.SegmentId) ArrayList(java.util.ArrayList) Sequence(org.apache.druid.java.util.common.guava.Sequence) MergeSequence(org.apache.druid.java.util.common.guava.MergeSequence) QueryRunner(org.apache.druid.query.QueryRunner) MergeSequence(org.apache.druid.java.util.common.guava.MergeSequence) ResponseContext(org.apache.druid.query.context.ResponseContext) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List)

Example 42 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class SegmentManager method getIndexedTables.

/**
 * Returns the collection of {@link IndexedTable} for the entire timeline (since join conditions do not currently
 * consider the queries intervals), if the timeline exists for each of its segments that are joinable.
 */
public Optional<Stream<ReferenceCountingIndexedTable>> getIndexedTables(DataSourceAnalysis analysis) {
    return getTimeline(analysis).map(timeline -> {
        // join doesn't currently consider intervals, so just consider all segments
        final Stream<ReferenceCountingSegment> segments = timeline.lookup(Intervals.ETERNITY).stream().flatMap(x -> StreamSupport.stream(x.getObject().payloads().spliterator(), false));
        final TableDataSource tableDataSource = getTableDataSource(analysis);
        ConcurrentHashMap<SegmentId, ReferenceCountingIndexedTable> tables = Optional.ofNullable(dataSources.get(tableDataSource.getName())).map(DataSourceState::getTablesLookup).orElseThrow(() -> new ISE("Datasource %s does not have IndexedTables", tableDataSource.getName()));
        return segments.map(segment -> tables.get(segment.getId())).filter(Objects::nonNull);
    });
}
Also used : ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) ReferenceCountingIndexedTable(org.apache.druid.segment.join.table.ReferenceCountingIndexedTable) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Intervals(org.apache.druid.java.util.common.Intervals) Inject(com.google.inject.Inject) SegmentLazyLoadFailCallback(org.apache.druid.segment.SegmentLazyLoadFailCallback) SegmentLoadingException(org.apache.druid.segment.loading.SegmentLoadingException) CollectionUtils(org.apache.druid.utils.CollectionUtils) SegmentLoader(org.apache.druid.segment.loading.SegmentLoader) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) SettableSupplier(org.apache.druid.common.guava.SettableSupplier) Map(java.util.Map) StreamSupport(java.util.stream.StreamSupport) ShardSpec(org.apache.druid.timeline.partition.ShardSpec) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) Closer(org.apache.druid.java.util.common.io.Closer) IndexedTable(org.apache.druid.segment.join.table.IndexedTable) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) IOException(java.io.IOException) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) TableDataSource(org.apache.druid.query.TableDataSource) Objects(java.util.Objects) Stream(java.util.stream.Stream) Ordering(com.google.common.collect.Ordering) DataSegment(org.apache.druid.timeline.DataSegment) Optional(java.util.Optional) VisibleForTesting(com.google.common.annotations.VisibleForTesting) SegmentId(org.apache.druid.timeline.SegmentId) ReferenceCountingIndexedTable(org.apache.druid.segment.join.table.ReferenceCountingIndexedTable) TableDataSource(org.apache.druid.query.TableDataSource) SegmentId(org.apache.druid.timeline.SegmentId) Objects(java.util.Objects) ISE(org.apache.druid.java.util.common.ISE)

Example 43 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class ActionBasedUsedSegmentChecker method findUsedSegments.

@Override
public Set<DataSegment> findUsedSegments(Set<SegmentIdWithShardSpec> segmentIds) throws IOException {
    // Group by dataSource
    final Map<String, Set<SegmentId>> idsByDataSource = new TreeMap<>();
    for (SegmentIdWithShardSpec segmentId : segmentIds) {
        idsByDataSource.computeIfAbsent(segmentId.getDataSource(), i -> new HashSet<>()).add(segmentId.asSegmentId());
    }
    final Set<DataSegment> usedSegments = new HashSet<>();
    for (Map.Entry<String, Set<SegmentId>> entry : idsByDataSource.entrySet()) {
        String dataSource = entry.getKey();
        Set<SegmentId> segmentIdsInDataSource = entry.getValue();
        final List<Interval> intervals = JodaUtils.condenseIntervals(Iterables.transform(segmentIdsInDataSource, SegmentId::getInterval));
        final Collection<DataSegment> usedSegmentsForIntervals = taskActionClient.submit(new RetrieveUsedSegmentsAction(dataSource, null, intervals, Segments.ONLY_VISIBLE));
        for (DataSegment segment : usedSegmentsForIntervals) {
            if (segmentIdsInDataSource.contains(segment.getId())) {
                usedSegments.add(segment);
            }
        }
    }
    return usedSegments;
}
Also used : Iterables(com.google.common.collect.Iterables) Collection(java.util.Collection) Segments(org.apache.druid.indexing.overlord.Segments) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) Set(java.util.Set) IOException(java.io.IOException) UsedSegmentChecker(org.apache.druid.segment.realtime.appenderator.UsedSegmentChecker) HashSet(java.util.HashSet) Interval(org.joda.time.Interval) List(java.util.List) JodaUtils(org.apache.druid.java.util.common.JodaUtils) TaskActionClient(org.apache.druid.indexing.common.actions.TaskActionClient) TreeMap(java.util.TreeMap) Map(java.util.Map) DataSegment(org.apache.druid.timeline.DataSegment) RetrieveUsedSegmentsAction(org.apache.druid.indexing.common.actions.RetrieveUsedSegmentsAction) SegmentId(org.apache.druid.timeline.SegmentId) Set(java.util.Set) HashSet(java.util.HashSet) SegmentId(org.apache.druid.timeline.SegmentId) TreeMap(java.util.TreeMap) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) DataSegment(org.apache.druid.timeline.DataSegment) RetrieveUsedSegmentsAction(org.apache.druid.indexing.common.actions.RetrieveUsedSegmentsAction) TreeMap(java.util.TreeMap) Map(java.util.Map) HashSet(java.util.HashSet) Interval(org.joda.time.Interval)

Example 44 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class AppenderatorImpl method mergeAndPush.

/**
 * Merge segment, push to deep storage. Should only be used on segments that have been fully persisted. Must only
 * be run in the single-threaded pushExecutor.
 *
 * @param identifier    sink identifier
 * @param sink          sink to push
 * @param useUniquePath true if the segment should be written to a path with a unique identifier
 *
 * @return segment descriptor, or null if the sink is no longer valid
 */
@Nullable
private DataSegment mergeAndPush(final SegmentIdWithShardSpec identifier, final Sink sink, final boolean useUniquePath) {
    // noinspection ObjectEquality
    if (sinks.get(identifier) != sink) {
        log.warn("Sink for segment[%s] no longer valid, bailing out of mergeAndPush.", identifier);
        return null;
    }
    // Use a descriptor file to indicate that pushing has completed.
    final File persistDir = computePersistDir(identifier);
    final File mergedTarget = new File(persistDir, "merged");
    final File descriptorFile = computeDescriptorFile(identifier);
    // Sanity checks
    for (FireHydrant hydrant : sink) {
        if (sink.isWritable()) {
            throw new ISE("Expected sink to be no longer writable before mergeAndPush for segment[%s].", identifier);
        }
        synchronized (hydrant) {
            if (!hydrant.hasSwapped()) {
                throw new ISE("Expected sink to be fully persisted before mergeAndPush for segment[%s].", identifier);
            }
        }
    }
    try {
        if (descriptorFile.exists()) {
            if (useUniquePath) {
                // Don't reuse the descriptor, because the caller asked for a unique path. Leave the old one as-is, since
                // it might serve some unknown purpose.
                log.debug("Segment[%s] already pushed, but we want a unique path, so will push again with a new path.", identifier);
            } else {
                log.info("Segment[%s] already pushed, skipping.", identifier);
                return objectMapper.readValue(descriptorFile, DataSegment.class);
            }
        }
        removeDirectory(mergedTarget);
        if (mergedTarget.exists()) {
            throw new ISE("Merged target[%s] exists after removing?!", mergedTarget);
        }
        final File mergedFile;
        final long mergeFinishTime;
        final long startTime = System.nanoTime();
        List<QueryableIndex> indexes = new ArrayList<>();
        Closer closer = Closer.create();
        try {
            for (FireHydrant fireHydrant : sink) {
                // if batch, swap/persist did not memory map the incremental index, we need it mapped now:
                if (!isOpenSegments()) {
                    // sanity
                    Pair<File, SegmentId> persistedMetadata = persistedHydrantMetadata.get(fireHydrant);
                    if (persistedMetadata == null) {
                        throw new ISE("Persisted metadata for batch hydrant [%s] is null!", fireHydrant);
                    }
                    File persistedFile = persistedMetadata.lhs;
                    SegmentId persistedSegmentId = persistedMetadata.rhs;
                    // sanity:
                    if (persistedFile == null) {
                        throw new ISE("Persisted file for batch hydrant [%s] is null!", fireHydrant);
                    } else if (persistedSegmentId == null) {
                        throw new ISE("Persisted segmentId for batch hydrant in file [%s] is null!", persistedFile.getPath());
                    }
                    fireHydrant.swapSegment(new QueryableIndexSegment(indexIO.loadIndex(persistedFile), persistedSegmentId));
                }
                Pair<ReferenceCountingSegment, Closeable> segmentAndCloseable = fireHydrant.getAndIncrementSegment();
                final QueryableIndex queryableIndex = segmentAndCloseable.lhs.asQueryableIndex();
                log.debug("Segment[%s] adding hydrant[%s]", identifier, fireHydrant);
                indexes.add(queryableIndex);
                closer.register(segmentAndCloseable.rhs);
            }
            mergedFile = indexMerger.mergeQueryableIndex(indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), schema.getDimensionsSpec(), mergedTarget, tuningConfig.getIndexSpec(), tuningConfig.getIndexSpecForIntermediatePersists(), new BaseProgressIndicator(), tuningConfig.getSegmentWriteOutMediumFactory(), tuningConfig.getMaxColumnsToMerge());
            mergeFinishTime = System.nanoTime();
            log.debug("Segment[%s] built in %,dms.", identifier, (mergeFinishTime - startTime) / 1000000);
        } catch (Throwable t) {
            throw closer.rethrow(t);
        } finally {
            closer.close();
        }
        final DataSegment segmentToPush = sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes, schema.getDimensionsSpec()));
        // Retry pushing segments because uploading to deep storage might fail especially for cloud storage types
        final DataSegment segment = RetryUtils.retry(// semantics.
        () -> dataSegmentPusher.push(mergedFile, segmentToPush, useUniquePath), exception -> exception instanceof Exception, 5);
        if (!isOpenSegments()) {
            // can generate OOMs during merge if enough of them are held back...
            for (FireHydrant fireHydrant : sink) {
                fireHydrant.swapSegment(null);
            }
        }
        final long pushFinishTime = System.nanoTime();
        objectMapper.writeValue(descriptorFile, segment);
        log.info("Segment[%s] of %,d bytes " + "built from %d incremental persist(s) in %,dms; " + "pushed to deep storage in %,dms. " + "Load spec is: %s", identifier, segment.getSize(), indexes.size(), (mergeFinishTime - startTime) / 1000000, (pushFinishTime - mergeFinishTime) / 1000000, objectMapper.writeValueAsString(segment.getLoadSpec()));
        return segment;
    } catch (Exception e) {
        metrics.incrementFailedHandoffs();
        log.warn(e, "Failed to push merged index for segment[%s].", identifier);
        throw new RuntimeException(e);
    }
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) SegmentId(org.apache.druid.timeline.SegmentId) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) QueryableIndex(org.apache.druid.segment.QueryableIndex) ISE(org.apache.druid.java.util.common.ISE) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) File(java.io.File) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator) Nullable(javax.annotation.Nullable)

Example 45 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class SinkQuerySegmentWalker method getQueryRunnerForSegments.

@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs) {
    // We only handle one particular dataSource. Make sure that's what we have, then ignore from here on out.
    final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
    // Sanity check: make sure the query is based on the table we're meant to handle.
    if (!analysis.getBaseTableDataSource().filter(ds -> dataSource.equals(ds.getName())).isPresent()) {
        throw new ISE("Cannot handle datasource: %s", analysis.getDataSource());
    }
    final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
    if (factory == null) {
        throw new ISE("Unknown query type[%s].", query.getClass());
    }
    final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
    final boolean skipIncrementalSegment = query.getContextValue(CONTEXT_SKIP_INCREMENTAL_SEGMENT, false);
    final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
    // Make sure this query type can handle the subquery, if present.
    if (analysis.isQuery() && !toolChest.canPerformSubquery(((QueryDataSource) analysis.getDataSource()).getQuery())) {
        throw new ISE("Cannot handle subquery: %s", analysis.getDataSource());
    }
    // segmentMapFn maps each base Segment into a joined Segment if necessary.
    final Function<SegmentReference, SegmentReference> segmentMapFn = joinableFactoryWrapper.createSegmentMapFn(analysis.getJoinBaseTableFilter().map(Filters::toFilter).orElse(null), analysis.getPreJoinableClauses(), cpuTimeAccumulator, analysis.getBaseQuery().orElse(query));
    // We compute the join cache key here itself so it doesn't need to be re-computed for every segment
    final Optional<byte[]> cacheKeyPrefix = analysis.isJoin() ? joinableFactoryWrapper.computeJoinDataSourceCacheKey(analysis) : Optional.of(StringUtils.EMPTY_BYTES);
    Iterable<QueryRunner<T>> perSegmentRunners = Iterables.transform(specs, descriptor -> {
        final PartitionChunk<Sink> chunk = sinkTimeline.findChunk(descriptor.getInterval(), descriptor.getVersion(), descriptor.getPartitionNumber());
        if (chunk == null) {
            return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
        }
        final Sink theSink = chunk.getObject();
        final SegmentId sinkSegmentId = theSink.getSegment().getId();
        Iterable<QueryRunner<T>> perHydrantRunners = new SinkQueryRunners<>(Iterables.transform(theSink, hydrant -> {
            // Hydrant might swap at any point, but if it's swapped at the start
            // then we know it's *definitely* swapped.
            final boolean hydrantDefinitelySwapped = hydrant.hasSwapped();
            if (skipIncrementalSegment && !hydrantDefinitelySwapped) {
                return new Pair<>(hydrant.getSegmentDataInterval(), new NoopQueryRunner<>());
            }
            // Prevent the underlying segment from swapping when its being iterated
            final Optional<Pair<SegmentReference, Closeable>> maybeSegmentAndCloseable = hydrant.getSegmentForQuery(segmentMapFn);
            // if optional isn't present, we failed to acquire reference to the segment or any joinables
            if (!maybeSegmentAndCloseable.isPresent()) {
                return new Pair<>(hydrant.getSegmentDataInterval(), new ReportTimelineMissingSegmentQueryRunner<>(descriptor));
            }
            final Pair<SegmentReference, Closeable> segmentAndCloseable = maybeSegmentAndCloseable.get();
            try {
                QueryRunner<T> runner = factory.createRunner(segmentAndCloseable.lhs);
                // 2) Hydrants are not the same between replicas, make sure cache is local
                if (hydrantDefinitelySwapped && cache.isLocal()) {
                    StorageAdapter storageAdapter = segmentAndCloseable.lhs.asStorageAdapter();
                    long segmentMinTime = storageAdapter.getMinTime().getMillis();
                    long segmentMaxTime = storageAdapter.getMaxTime().getMillis();
                    Interval actualDataInterval = Intervals.utc(segmentMinTime, segmentMaxTime + 1);
                    runner = new CachingQueryRunner<>(makeHydrantCacheIdentifier(hydrant), cacheKeyPrefix, descriptor, actualDataInterval, objectMapper, cache, toolChest, runner, // Always populate in foreground regardless of config
                    new ForegroundCachePopulator(objectMapper, cachePopulatorStats, cacheConfig.getMaxEntrySize()), cacheConfig);
                }
                // Make it always use Closeable to decrement()
                runner = QueryRunnerHelper.makeClosingQueryRunner(runner, segmentAndCloseable.rhs);
                return new Pair<>(segmentAndCloseable.lhs.getDataInterval(), runner);
            } catch (Throwable e) {
                throw CloseableUtils.closeAndWrapInCatch(e, segmentAndCloseable.rhs);
            }
        }));
        return new SpecificSegmentQueryRunner<>(withPerSinkMetrics(new BySegmentQueryRunner<>(sinkSegmentId, descriptor.getInterval().getStart(), factory.mergeRunners(DirectQueryProcessingPool.INSTANCE, perHydrantRunners)), toolChest, sinkSegmentId, cpuTimeAccumulator), new SpecificSegmentSpec(descriptor));
    });
    final QueryRunner<T> mergedRunner = toolChest.mergeResults(factory.mergeRunners(queryProcessingPool, perSegmentRunners));
    return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<>(mergedRunner, toolChest), toolChest, emitter, cpuTimeAccumulator, true);
}
Also used : DirectQueryProcessingPool(org.apache.druid.query.DirectQueryProcessingPool) QueryRunnerHelper(org.apache.druid.query.QueryRunnerHelper) QueryProcessingPool(org.apache.druid.query.QueryProcessingPool) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) StorageAdapter(org.apache.druid.segment.StorageAdapter) Pair(org.apache.druid.java.util.common.Pair) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) SegmentReference(org.apache.druid.segment.SegmentReference) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) CacheConfig(org.apache.druid.client.cache.CacheConfig) StringUtils(org.apache.druid.java.util.common.StringUtils) JoinableFactoryWrapper(org.apache.druid.segment.join.JoinableFactoryWrapper) ISE(org.apache.druid.java.util.common.ISE) SpecificSegmentSpec(org.apache.druid.query.spec.SpecificSegmentSpec) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) SinkQueryRunners(org.apache.druid.query.SinkQueryRunners) QueryDataSource(org.apache.druid.query.QueryDataSource) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) Optional(java.util.Optional) FunctionalIterable(org.apache.druid.java.util.common.guava.FunctionalIterable) SegmentId(org.apache.druid.timeline.SegmentId) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Iterables(com.google.common.collect.Iterables) Intervals(org.apache.druid.java.util.common.Intervals) QueryMetrics(org.apache.druid.query.QueryMetrics) CachingQueryRunner(org.apache.druid.client.CachingQueryRunner) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) Function(java.util.function.Function) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) MetricsEmittingQueryRunner(org.apache.druid.query.MetricsEmittingQueryRunner) Query(org.apache.druid.query.Query) CachePopulatorStats(org.apache.druid.client.cache.CachePopulatorStats) Sink(org.apache.druid.segment.realtime.plumber.Sink) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) QueryToolChest(org.apache.druid.query.QueryToolChest) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) AtomicLong(java.util.concurrent.atomic.AtomicLong) QueryRunnerFactory(org.apache.druid.query.QueryRunnerFactory) Closeable(java.io.Closeable) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Cache(org.apache.druid.client.cache.Cache) Filters(org.apache.druid.segment.filter.Filters) CloseableUtils(org.apache.druid.utils.CloseableUtils) CPUTimeMetricQueryRunner(org.apache.druid.query.CPUTimeMetricQueryRunner) Query(org.apache.druid.query.Query) Closeable(java.io.Closeable) StorageAdapter(org.apache.druid.segment.StorageAdapter) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) Filters(org.apache.druid.segment.filter.Filters) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) Sink(org.apache.druid.segment.realtime.plumber.Sink) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) CachingQueryRunner(org.apache.druid.client.CachingQueryRunner) ISE(org.apache.druid.java.util.common.ISE) Pair(org.apache.druid.java.util.common.Pair) Optional(java.util.Optional) SegmentId(org.apache.druid.timeline.SegmentId) SegmentReference(org.apache.druid.segment.SegmentReference) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) CachingQueryRunner(org.apache.druid.client.CachingQueryRunner) MetricsEmittingQueryRunner(org.apache.druid.query.MetricsEmittingQueryRunner) CPUTimeMetricQueryRunner(org.apache.druid.query.CPUTimeMetricQueryRunner) AtomicLong(java.util.concurrent.atomic.AtomicLong) SinkQueryRunners(org.apache.druid.query.SinkQueryRunners) SpecificSegmentSpec(org.apache.druid.query.spec.SpecificSegmentSpec) ForegroundCachePopulator(org.apache.druid.client.cache.ForegroundCachePopulator) Interval(org.joda.time.Interval)

Aggregations

SegmentId (org.apache.druid.timeline.SegmentId)63 DataSegment (org.apache.druid.timeline.DataSegment)32 Test (org.junit.Test)21 Interval (org.joda.time.Interval)14 ISE (org.apache.druid.java.util.common.ISE)13 ArrayList (java.util.ArrayList)12 Map (java.util.Map)12 Set (java.util.Set)12 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)12 List (java.util.List)11 ImmutableMap (com.google.common.collect.ImmutableMap)10 IOException (java.io.IOException)9 TreeMap (java.util.TreeMap)9 CountDownLatch (java.util.concurrent.CountDownLatch)9 VisibleForTesting (com.google.common.annotations.VisibleForTesting)8 Collectors (java.util.stream.Collectors)8 Optional (java.util.Optional)7 Sets (com.google.common.collect.Sets)6 Nullable (javax.annotation.Nullable)6 Response (javax.ws.rs.core.Response)6