Search in sources :

Example 1 with BaseProgressIndicator

use of org.apache.druid.segment.BaseProgressIndicator in project druid by druid-io.

the class StreamAppenderator method mergeAndPush.

/**
 * Merge segment, push to deep storage. Should only be used on segments that have been fully persisted. Must only
 * be run in the single-threaded pushExecutor.
 *
 * @param identifier    sink identifier
 * @param sink          sink to push
 * @param useUniquePath true if the segment should be written to a path with a unique identifier
 *
 * @return segment descriptor, or null if the sink is no longer valid
 */
@Nullable
private DataSegment mergeAndPush(final SegmentIdWithShardSpec identifier, final Sink sink, final boolean useUniquePath) {
    // noinspection ObjectEquality
    if (sinks.get(identifier) != sink) {
        log.warn("Sink for segment[%s] no longer valid, bailing out of mergeAndPush.", identifier);
        return null;
    }
    // Use a descriptor file to indicate that pushing has completed.
    final File persistDir = computePersistDir(identifier);
    final File mergedTarget = new File(persistDir, "merged");
    final File descriptorFile = computeDescriptorFile(identifier);
    // Sanity checks
    for (FireHydrant hydrant : sink) {
        if (sink.isWritable()) {
            throw new ISE("Expected sink to be no longer writable before mergeAndPush for segment[%s].", identifier);
        }
        synchronized (hydrant) {
            if (!hydrant.hasSwapped()) {
                throw new ISE("Expected sink to be fully persisted before mergeAndPush for segment[%s].", identifier);
            }
        }
    }
    try {
        if (descriptorFile.exists()) {
            if (useUniquePath) {
                // Don't reuse the descriptor, because the caller asked for a unique path. Leave the old one as-is, since
                // it might serve some unknown purpose.
                log.debug("Segment[%s] already pushed, but we want a unique path, so will push again with a new path.", identifier);
            } else {
                log.info("Segment[%s] already pushed, skipping.", identifier);
                return objectMapper.readValue(descriptorFile, DataSegment.class);
            }
        }
        removeDirectory(mergedTarget);
        if (mergedTarget.exists()) {
            throw new ISE("Merged target[%s] exists after removing?!", mergedTarget);
        }
        final File mergedFile;
        final long mergeFinishTime;
        final long startTime = System.nanoTime();
        List<QueryableIndex> indexes = new ArrayList<>();
        Closer closer = Closer.create();
        try {
            for (FireHydrant fireHydrant : sink) {
                Pair<ReferenceCountingSegment, Closeable> segmentAndCloseable = fireHydrant.getAndIncrementSegment();
                final QueryableIndex queryableIndex = segmentAndCloseable.lhs.asQueryableIndex();
                log.debug("Segment[%s] adding hydrant[%s]", identifier, fireHydrant);
                indexes.add(queryableIndex);
                closer.register(segmentAndCloseable.rhs);
            }
            mergedFile = indexMerger.mergeQueryableIndex(indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), schema.getDimensionsSpec(), mergedTarget, tuningConfig.getIndexSpec(), tuningConfig.getIndexSpecForIntermediatePersists(), new BaseProgressIndicator(), tuningConfig.getSegmentWriteOutMediumFactory(), tuningConfig.getMaxColumnsToMerge());
            mergeFinishTime = System.nanoTime();
            log.debug("Segment[%s] built in %,dms.", identifier, (mergeFinishTime - startTime) / 1000000);
        } catch (Throwable t) {
            throw closer.rethrow(t);
        } finally {
            closer.close();
        }
        final DataSegment segmentToPush = sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes, schema.getDimensionsSpec()));
        // Retry pushing segments because uploading to deep storage might fail especially for cloud storage types
        final DataSegment segment = RetryUtils.retry(// semantics.
        () -> dataSegmentPusher.push(mergedFile, segmentToPush, useUniquePath), exception -> exception instanceof Exception, 5);
        final long pushFinishTime = System.nanoTime();
        objectMapper.writeValue(descriptorFile, segment);
        log.info("Segment[%s] of %,d bytes " + "built from %d incremental persist(s) in %,dms; " + "pushed to deep storage in %,dms. " + "Load spec is: %s", identifier, segment.getSize(), indexes.size(), (mergeFinishTime - startTime) / 1000000, (pushFinishTime - mergeFinishTime) / 1000000, objectMapper.writeValueAsString(segment.getLoadSpec()));
        return segment;
    } catch (Exception e) {
        metrics.incrementFailedHandoffs();
        log.warn(e, "Failed to push merged index for segment[%s].", identifier);
        throw new RuntimeException(e);
    }
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) QueryableIndex(org.apache.druid.segment.QueryableIndex) ISE(org.apache.druid.java.util.common.ISE) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) File(java.io.File) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator) Nullable(javax.annotation.Nullable)

Example 2 with BaseProgressIndicator

use of org.apache.druid.segment.BaseProgressIndicator in project druid by druid-io.

the class PartialSegmentMergeTask method mergeSegmentsInSamePartition.

private static Pair<File, List<String>> mergeSegmentsInSamePartition(DataSchema dataSchema, ParallelIndexTuningConfig tuningConfig, IndexIO indexIO, IndexMergerV9 merger, List<File> indexes, int maxNumSegmentsToMerge, File baseOutDir, int outDirSuffix) throws IOException {
    int suffix = outDirSuffix;
    final List<File> mergedFiles = new ArrayList<>();
    List<String> dimensionNames = null;
    for (int i = 0; i < indexes.size(); i += maxNumSegmentsToMerge) {
        final List<File> filesToMerge = indexes.subList(i, Math.min(i + maxNumSegmentsToMerge, indexes.size()));
        final List<QueryableIndex> indexesToMerge = new ArrayList<>(filesToMerge.size());
        final Closer indexCleaner = Closer.create();
        for (File file : filesToMerge) {
            final QueryableIndex queryableIndex = indexIO.loadIndex(file);
            indexesToMerge.add(queryableIndex);
            indexCleaner.register(() -> {
                queryableIndex.close();
                file.delete();
            });
        }
        if (maxNumSegmentsToMerge >= indexes.size()) {
            dimensionNames = IndexMerger.getMergedDimensionsFromQueryableIndexes(indexesToMerge, dataSchema.getDimensionsSpec());
        }
        final File outDir = new File(baseOutDir, StringUtils.format("merged_%d", suffix++));
        mergedFiles.add(merger.mergeQueryableIndex(indexesToMerge, dataSchema.getGranularitySpec().isRollup(), dataSchema.getAggregators(), null, outDir, tuningConfig.getIndexSpec(), tuningConfig.getIndexSpecForIntermediatePersists(), new BaseProgressIndicator(), tuningConfig.getSegmentWriteOutMediumFactory(), tuningConfig.getMaxColumnsToMerge()));
        indexCleaner.close();
    }
    if (mergedFiles.size() == 1) {
        return Pair.of(mergedFiles.get(0), Preconditions.checkNotNull(dimensionNames, "dimensionNames"));
    } else {
        return mergeSegmentsInSamePartition(dataSchema, tuningConfig, indexIO, merger, mergedFiles, maxNumSegmentsToMerge, baseOutDir, suffix);
    }
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) QueryableIndex(org.apache.druid.segment.QueryableIndex) ArrayList(java.util.ArrayList) File(java.io.File) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator)

Example 3 with BaseProgressIndicator

use of org.apache.druid.segment.BaseProgressIndicator in project druid by druid-io.

the class SegmentGenerator method generate.

public QueryableIndex generate(final DataSegment dataSegment, final GeneratorSchemaInfo schemaInfo, final Granularity granularity, final int numRows) {
    // In case we need to generate hyperUniques.
    ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
    final String dataHash = Hashing.sha256().newHasher().putString(dataSegment.getId().toString(), StandardCharsets.UTF_8).putString(schemaInfo.toString(), StandardCharsets.UTF_8).putString(granularity.toString(), StandardCharsets.UTF_8).putInt(numRows).hash().toString();
    final File outDir = new File(getSegmentDir(dataSegment.getId(), dataHash), "merged");
    if (outDir.exists()) {
        try {
            log.info("Found segment with hash[%s] cached in directory[%s].", dataHash, outDir);
            return TestHelper.getTestIndexIO().loadIndex(outDir);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    log.info("Writing segment with hash[%s] to directory[%s].", dataHash, outDir);
    final DataGenerator dataGenerator = new DataGenerator(schemaInfo.getColumnSchemas(), dataSegment.getId().hashCode(), /* Use segment identifier hashCode as seed */
    schemaInfo.getDataInterval(), numRows);
    final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withDimensionsSpec(schemaInfo.getDimensionsSpec()).withMetrics(schemaInfo.getAggsArray()).withRollup(schemaInfo.isWithRollup()).withQueryGranularity(granularity).build();
    final List<InputRow> rows = new ArrayList<>();
    final List<QueryableIndex> indexes = new ArrayList<>();
    for (int i = 0; i < numRows; i++) {
        final InputRow row = dataGenerator.nextRow();
        rows.add(row);
        if ((i + 1) % 20000 == 0) {
            log.info("%,d/%,d rows generated for[%s].", i + 1, numRows, dataSegment);
        }
        if (rows.size() % MAX_ROWS_IN_MEMORY == 0) {
            indexes.add(makeIndex(dataSegment.getId(), dataHash, indexes.size(), rows, indexSchema));
            rows.clear();
        }
    }
    log.info("%,d/%,d rows generated for[%s].", numRows, numRows, dataSegment);
    if (rows.size() > 0) {
        indexes.add(makeIndex(dataSegment.getId(), dataHash, indexes.size(), rows, indexSchema));
        rows.clear();
    }
    final QueryableIndex retVal;
    if (indexes.isEmpty()) {
        throw new ISE("No rows to index?");
    } else {
        try {
            final IndexSpec indexSpec = new IndexSpec(new RoaringBitmapSerdeFactory(true), null, null, null);
            retVal = TestHelper.getTestIndexIO().loadIndex(TestHelper.getTestIndexMergerV9(OffHeapMemorySegmentWriteOutMediumFactory.instance()).mergeQueryableIndex(indexes, false, schemaInfo.getAggs().stream().map(AggregatorFactory::getCombiningFactory).toArray(AggregatorFactory[]::new), null, outDir, indexSpec, indexSpec, new BaseProgressIndicator(), null, -1));
            for (QueryableIndex index : indexes) {
                index.close();
            }
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    log.info("Finished writing segment[%s] to[%s]", dataSegment, outDir);
    return retVal;
}
Also used : IndexSpec(org.apache.druid.segment.IndexSpec) ArrayList(java.util.ArrayList) HyperUniquesSerde(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde) IOException(java.io.IOException) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) RoaringBitmapSerdeFactory(org.apache.druid.segment.data.RoaringBitmapSerdeFactory) QueryableIndex(org.apache.druid.segment.QueryableIndex) InputRow(org.apache.druid.data.input.InputRow) ISE(org.apache.druid.java.util.common.ISE) File(java.io.File) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator)

Example 4 with BaseProgressIndicator

use of org.apache.druid.segment.BaseProgressIndicator in project druid by druid-io.

the class RealtimePlumber method persistAndMerge.

// Submits persist-n-merge task for a Sink to the mergeExecutor
private void persistAndMerge(final long truncatedTime, final Sink sink) {
    final String threadName = StringUtils.format("%s-%s-persist-n-merge", schema.getDataSource(), DateTimes.utc(truncatedTime));
    mergeExecutor.execute(new ThreadRenamingRunnable(threadName) {

        final Interval interval = sink.getInterval();

        Stopwatch mergeStopwatch = null;

        @Override
        public void doRun() {
            try {
                // Bail out if this sink has been abandoned by a previously-executed task.
                if (sinks.get(truncatedTime) != sink) {
                    log.info("Sink[%s] was abandoned, bailing out of persist-n-merge.", sink);
                    return;
                }
                // Use a file to indicate that pushing has completed.
                final File persistDir = computePersistDir(schema, interval);
                final File mergedTarget = new File(persistDir, "merged");
                final File isPushedMarker = new File(persistDir, "isPushedMarker");
                if (!isPushedMarker.exists()) {
                    removeSegment(sink, mergedTarget);
                    if (mergedTarget.exists()) {
                        log.warn("Merged target[%s] still exists after attempt to delete it; skipping push.", mergedTarget);
                        return;
                    }
                } else {
                    log.info("Already pushed sink[%s]", sink);
                    return;
                }
                /*
            Note: it the plumber crashes after persisting a subset of hydrants then might duplicate data as these
            hydrants will be read but older commitMetadata will be used. fixing this possibly needs structural
            changes to plumber.
             */
                for (FireHydrant hydrant : sink) {
                    synchronized (hydrant) {
                        if (!hydrant.hasSwapped()) {
                            log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink);
                            final int rowCount = persistHydrant(hydrant, schema, interval, null);
                            metrics.incrementRowOutputCount(rowCount);
                        }
                    }
                }
                final long mergeThreadCpuTime = JvmUtils.safeGetThreadCpuTime();
                mergeStopwatch = Stopwatch.createStarted();
                final File mergedFile;
                List<QueryableIndex> indexes = new ArrayList<>();
                Closer closer = Closer.create();
                try {
                    for (FireHydrant fireHydrant : sink) {
                        Pair<ReferenceCountingSegment, Closeable> segmentAndCloseable = fireHydrant.getAndIncrementSegment();
                        final QueryableIndex queryableIndex = segmentAndCloseable.lhs.asQueryableIndex();
                        log.info("Adding hydrant[%s]", fireHydrant);
                        indexes.add(queryableIndex);
                        closer.register(segmentAndCloseable.rhs);
                    }
                    mergedFile = indexMerger.mergeQueryableIndex(indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), null, mergedTarget, config.getIndexSpec(), config.getIndexSpecForIntermediatePersists(), new BaseProgressIndicator(), config.getSegmentWriteOutMediumFactory(), -1);
                } catch (Throwable t) {
                    throw closer.rethrow(t);
                } finally {
                    closer.close();
                }
                // emit merge metrics before publishing segment
                metrics.incrementMergeCpuTime(JvmUtils.safeGetThreadCpuTime() - mergeThreadCpuTime);
                metrics.incrementMergeTimeMillis(mergeStopwatch.elapsed(TimeUnit.MILLISECONDS));
                log.info("Pushing [%s] to deep storage", sink.getSegment().getId());
                DataSegment segment = dataSegmentPusher.push(mergedFile, sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes, schema.getDimensionsSpec())), false);
                log.info("Inserting [%s] to the metadata store", sink.getSegment().getId());
                segmentPublisher.publishSegment(segment);
                if (!isPushedMarker.createNewFile()) {
                    log.makeAlert("Failed to create marker file for [%s]", schema.getDataSource()).addData("interval", sink.getInterval()).addData("partitionNum", segment.getShardSpec().getPartitionNum()).addData("marker", isPushedMarker).emit();
                }
            } catch (Exception e) {
                metrics.incrementFailedHandoffs();
                log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource()).addData("interval", interval).emit();
                if (shuttingDown) {
                    // We're trying to shut down, and this segment failed to push. Let's just get rid of it.
                    // This call will also delete possibly-partially-written files, so we don't need to do it explicitly.
                    cleanShutdown = false;
                    abandonSegment(truncatedTime, sink);
                }
            } finally {
                if (mergeStopwatch != null) {
                    mergeStopwatch.stop();
                }
            }
        }
    });
    handoffNotifier.registerSegmentHandoffCallback(new SegmentDescriptor(sink.getInterval(), sink.getVersion(), config.getShardSpec().getPartitionNum()), mergeExecutor, new Runnable() {

        @Override
        public void run() {
            abandonSegment(sink.getInterval().getStartMillis(), sink);
            metrics.incrementHandOffCount();
        }
    });
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) Stopwatch(com.google.common.base.Stopwatch) DataSegment(org.apache.druid.timeline.DataSegment) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) QueryableIndex(org.apache.druid.segment.QueryableIndex) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) ThreadRenamingRunnable(org.apache.druid.common.guava.ThreadRenamingRunnable) List(java.util.List) ArrayList(java.util.ArrayList) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) ThreadRenamingRunnable(org.apache.druid.common.guava.ThreadRenamingRunnable) File(java.io.File) Interval(org.joda.time.Interval) Pair(org.apache.druid.java.util.common.Pair) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator)

Example 5 with BaseProgressIndicator

use of org.apache.druid.segment.BaseProgressIndicator in project druid by druid-io.

the class AppenderatorImpl method mergeAndPush.

/**
 * Merge segment, push to deep storage. Should only be used on segments that have been fully persisted. Must only
 * be run in the single-threaded pushExecutor.
 *
 * @param identifier    sink identifier
 * @param sink          sink to push
 * @param useUniquePath true if the segment should be written to a path with a unique identifier
 *
 * @return segment descriptor, or null if the sink is no longer valid
 */
@Nullable
private DataSegment mergeAndPush(final SegmentIdWithShardSpec identifier, final Sink sink, final boolean useUniquePath) {
    // noinspection ObjectEquality
    if (sinks.get(identifier) != sink) {
        log.warn("Sink for segment[%s] no longer valid, bailing out of mergeAndPush.", identifier);
        return null;
    }
    // Use a descriptor file to indicate that pushing has completed.
    final File persistDir = computePersistDir(identifier);
    final File mergedTarget = new File(persistDir, "merged");
    final File descriptorFile = computeDescriptorFile(identifier);
    // Sanity checks
    for (FireHydrant hydrant : sink) {
        if (sink.isWritable()) {
            throw new ISE("Expected sink to be no longer writable before mergeAndPush for segment[%s].", identifier);
        }
        synchronized (hydrant) {
            if (!hydrant.hasSwapped()) {
                throw new ISE("Expected sink to be fully persisted before mergeAndPush for segment[%s].", identifier);
            }
        }
    }
    try {
        if (descriptorFile.exists()) {
            if (useUniquePath) {
                // Don't reuse the descriptor, because the caller asked for a unique path. Leave the old one as-is, since
                // it might serve some unknown purpose.
                log.debug("Segment[%s] already pushed, but we want a unique path, so will push again with a new path.", identifier);
            } else {
                log.info("Segment[%s] already pushed, skipping.", identifier);
                return objectMapper.readValue(descriptorFile, DataSegment.class);
            }
        }
        removeDirectory(mergedTarget);
        if (mergedTarget.exists()) {
            throw new ISE("Merged target[%s] exists after removing?!", mergedTarget);
        }
        final File mergedFile;
        final long mergeFinishTime;
        final long startTime = System.nanoTime();
        List<QueryableIndex> indexes = new ArrayList<>();
        Closer closer = Closer.create();
        try {
            for (FireHydrant fireHydrant : sink) {
                // if batch, swap/persist did not memory map the incremental index, we need it mapped now:
                if (!isOpenSegments()) {
                    // sanity
                    Pair<File, SegmentId> persistedMetadata = persistedHydrantMetadata.get(fireHydrant);
                    if (persistedMetadata == null) {
                        throw new ISE("Persisted metadata for batch hydrant [%s] is null!", fireHydrant);
                    }
                    File persistedFile = persistedMetadata.lhs;
                    SegmentId persistedSegmentId = persistedMetadata.rhs;
                    // sanity:
                    if (persistedFile == null) {
                        throw new ISE("Persisted file for batch hydrant [%s] is null!", fireHydrant);
                    } else if (persistedSegmentId == null) {
                        throw new ISE("Persisted segmentId for batch hydrant in file [%s] is null!", persistedFile.getPath());
                    }
                    fireHydrant.swapSegment(new QueryableIndexSegment(indexIO.loadIndex(persistedFile), persistedSegmentId));
                }
                Pair<ReferenceCountingSegment, Closeable> segmentAndCloseable = fireHydrant.getAndIncrementSegment();
                final QueryableIndex queryableIndex = segmentAndCloseable.lhs.asQueryableIndex();
                log.debug("Segment[%s] adding hydrant[%s]", identifier, fireHydrant);
                indexes.add(queryableIndex);
                closer.register(segmentAndCloseable.rhs);
            }
            mergedFile = indexMerger.mergeQueryableIndex(indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), schema.getDimensionsSpec(), mergedTarget, tuningConfig.getIndexSpec(), tuningConfig.getIndexSpecForIntermediatePersists(), new BaseProgressIndicator(), tuningConfig.getSegmentWriteOutMediumFactory(), tuningConfig.getMaxColumnsToMerge());
            mergeFinishTime = System.nanoTime();
            log.debug("Segment[%s] built in %,dms.", identifier, (mergeFinishTime - startTime) / 1000000);
        } catch (Throwable t) {
            throw closer.rethrow(t);
        } finally {
            closer.close();
        }
        final DataSegment segmentToPush = sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes, schema.getDimensionsSpec()));
        // Retry pushing segments because uploading to deep storage might fail especially for cloud storage types
        final DataSegment segment = RetryUtils.retry(// semantics.
        () -> dataSegmentPusher.push(mergedFile, segmentToPush, useUniquePath), exception -> exception instanceof Exception, 5);
        if (!isOpenSegments()) {
            // can generate OOMs during merge if enough of them are held back...
            for (FireHydrant fireHydrant : sink) {
                fireHydrant.swapSegment(null);
            }
        }
        final long pushFinishTime = System.nanoTime();
        objectMapper.writeValue(descriptorFile, segment);
        log.info("Segment[%s] of %,d bytes " + "built from %d incremental persist(s) in %,dms; " + "pushed to deep storage in %,dms. " + "Load spec is: %s", identifier, segment.getSize(), indexes.size(), (mergeFinishTime - startTime) / 1000000, (pushFinishTime - mergeFinishTime) / 1000000, objectMapper.writeValueAsString(segment.getLoadSpec()));
        return segment;
    } catch (Exception e) {
        metrics.incrementFailedHandoffs();
        log.warn(e, "Failed to push merged index for segment[%s].", identifier);
        throw new RuntimeException(e);
    }
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) SegmentId(org.apache.druid.timeline.SegmentId) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) QueryableIndex(org.apache.druid.segment.QueryableIndex) ISE(org.apache.druid.java.util.common.ISE) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) File(java.io.File) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator) Nullable(javax.annotation.Nullable)

Aggregations

File (java.io.File)6 ArrayList (java.util.ArrayList)6 BaseProgressIndicator (org.apache.druid.segment.BaseProgressIndicator)6 QueryableIndex (org.apache.druid.segment.QueryableIndex)6 IOException (java.io.IOException)5 Closer (org.apache.druid.java.util.common.io.Closer)5 ISE (org.apache.druid.java.util.common.ISE)4 IndexSizeExceededException (org.apache.druid.segment.incremental.IndexSizeExceededException)4 FireHydrant (org.apache.druid.segment.realtime.FireHydrant)4 DataSegment (org.apache.druid.timeline.DataSegment)4 Closeable (java.io.Closeable)3 Nullable (javax.annotation.Nullable)3 ReferenceCountingSegment (org.apache.druid.segment.ReferenceCountingSegment)3 ExecutionException (java.util.concurrent.ExecutionException)2 Stopwatch (com.google.common.base.Stopwatch)1 List (java.util.List)1 ThreadRenamingRunnable (org.apache.druid.common.guava.ThreadRenamingRunnable)1 InputRow (org.apache.druid.data.input.InputRow)1 Pair (org.apache.druid.java.util.common.Pair)1 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)1