Search in sources :

Example 26 with FireHydrant

use of org.apache.druid.segment.realtime.FireHydrant in project druid by apache.

the class AppenderatorImpl method abandonSegment.

private ListenableFuture<?> abandonSegment(final SegmentIdWithShardSpec identifier, final Sink sink, final boolean removeOnDiskData) {
    // Ensure no future writes will be made to this sink.
    if (sink.finishWriting()) {
        // Decrement this sink's rows from the counters. we only count active sinks so that we don't double decrement,
        // i.e. those that haven't been persisted for *InMemory counters, or pushed to deep storage for the total counter.
        rowsCurrentlyInMemory.addAndGet(-sink.getNumRowsInMemory());
        bytesCurrentlyInMemory.addAndGet(-sink.getBytesInMemory());
        bytesCurrentlyInMemory.addAndGet(-calculateSinkMemoryInUsed());
        for (FireHydrant hydrant : sink) {
            // Decrement memory used by all Memory Mapped Hydrant
            if (!hydrant.equals(sink.getCurrHydrant())) {
                bytesCurrentlyInMemory.addAndGet(-calculateMMappedHydrantMemoryInUsed(hydrant));
            }
        }
        totalRows.addAndGet(-sink.getNumRows());
    }
    // Mark this identifier as dropping, so no future push tasks will pick it up.
    droppingSinks.add(identifier);
    // Wait for any outstanding pushes to finish, then abandon the segment inside the persist thread.
    return Futures.transform(pushBarrier(), new Function<Object, Void>() {

        @Nullable
        @Override
        public Void apply(@Nullable Object input) {
            if (!sinks.remove(identifier, sink)) {
                log.error("Sink for segment[%s] no longer valid, not abandoning.", identifier);
                return null;
            }
            metrics.setSinkCount(sinks.size());
            if (removeOnDiskData) {
                // Remove this segment from the committed list. This must be done from the persist thread.
                log.debug("Removing commit metadata for segment[%s].", identifier);
                try {
                    commitLock.lock();
                    final Committed oldCommit = readCommit();
                    if (oldCommit != null) {
                        writeCommit(oldCommit.without(identifier.toString()));
                    }
                } catch (Exception e) {
                    log.makeAlert(e, "Failed to update committed segments[%s]", schema.getDataSource()).addData("identifier", identifier.toString()).emit();
                    throw new RuntimeException(e);
                } finally {
                    commitLock.unlock();
                }
            }
            // Unannounce the segment.
            try {
                segmentAnnouncer.unannounceSegment(sink.getSegment());
            } catch (Exception e) {
                log.makeAlert(e, "Failed to unannounce segment[%s]", schema.getDataSource()).addData("identifier", identifier.toString()).emit();
            }
            droppingSinks.remove(identifier);
            sinkTimeline.remove(sink.getInterval(), sink.getVersion(), identifier.getShardSpec().createChunk(sink));
            for (FireHydrant hydrant : sink) {
                if (cache != null) {
                    cache.close(SinkQuerySegmentWalker.makeHydrantCacheIdentifier(hydrant));
                }
                hydrant.swapSegment(null);
                // remove hydrant from persisted metadata:
                persistedHydrantMetadata.remove(hydrant);
            }
            if (removeOnDiskData) {
                removeDirectory(computePersistDir(identifier));
            }
            log.info("Dropped segment[%s].", identifier);
            return null;
        }
    }, // starting to abandon segments
    persistExecutor);
}
Also used : FireHydrant(org.apache.druid.segment.realtime.FireHydrant) Nullable(javax.annotation.Nullable) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Example 27 with FireHydrant

use of org.apache.druid.segment.realtime.FireHydrant in project druid by apache.

the class BatchAppenderator method getSinkForIdentifierPath.

private Sink getSinkForIdentifierPath(SegmentIdWithShardSpec identifier, File identifierPath) throws IOException {
    // To avoid reading and listing of "merged" dir and other special files
    final File[] sinkFiles = identifierPath.listFiles((dir, fileName) -> !(Ints.tryParse(fileName) == null));
    if (sinkFiles == null) {
        throw new ISE("Problem reading persisted sinks in path[%s]", identifierPath);
    }
    Arrays.sort(sinkFiles, (o1, o2) -> Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName())));
    List<FireHydrant> hydrants = new ArrayList<>();
    for (File hydrantDir : sinkFiles) {
        final int hydrantNumber = Integer.parseInt(hydrantDir.getName());
        log.debug("Loading previously persisted partial segment at [%s]", hydrantDir);
        if (hydrantNumber != hydrants.size()) {
            throw new ISE("Missing hydrant [%,d] in identifier [%s].", hydrants.size(), identifier);
        }
        hydrants.add(new FireHydrant(new QueryableIndexSegment(indexIO.loadIndex(hydrantDir), identifier.asSegmentId()), hydrantNumber));
    }
    Sink retVal = new Sink(identifier.getInterval(), schema, identifier.getShardSpec(), identifier.getVersion(), tuningConfig.getAppendableIndexSpec(), tuningConfig.getMaxRowsInMemory(), maxBytesTuningConfig, useMaxMemoryEstimates, null, hydrants);
    // this sink is not writable
    retVal.finishWriting();
    return retVal;
}
Also used : QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) Sink(org.apache.druid.segment.realtime.plumber.Sink) ArrayList(java.util.ArrayList) ISE(org.apache.druid.java.util.common.ISE) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) File(java.io.File)

Example 28 with FireHydrant

use of org.apache.druid.segment.realtime.FireHydrant in project druid by apache.

the class StreamAppenderator method mergeAndPush.

/**
 * Merge segment, push to deep storage. Should only be used on segments that have been fully persisted. Must only
 * be run in the single-threaded pushExecutor.
 *
 * @param identifier    sink identifier
 * @param sink          sink to push
 * @param useUniquePath true if the segment should be written to a path with a unique identifier
 *
 * @return segment descriptor, or null if the sink is no longer valid
 */
@Nullable
private DataSegment mergeAndPush(final SegmentIdWithShardSpec identifier, final Sink sink, final boolean useUniquePath) {
    // noinspection ObjectEquality
    if (sinks.get(identifier) != sink) {
        log.warn("Sink for segment[%s] no longer valid, bailing out of mergeAndPush.", identifier);
        return null;
    }
    // Use a descriptor file to indicate that pushing has completed.
    final File persistDir = computePersistDir(identifier);
    final File mergedTarget = new File(persistDir, "merged");
    final File descriptorFile = computeDescriptorFile(identifier);
    // Sanity checks
    for (FireHydrant hydrant : sink) {
        if (sink.isWritable()) {
            throw new ISE("Expected sink to be no longer writable before mergeAndPush for segment[%s].", identifier);
        }
        synchronized (hydrant) {
            if (!hydrant.hasSwapped()) {
                throw new ISE("Expected sink to be fully persisted before mergeAndPush for segment[%s].", identifier);
            }
        }
    }
    try {
        if (descriptorFile.exists()) {
            if (useUniquePath) {
                // Don't reuse the descriptor, because the caller asked for a unique path. Leave the old one as-is, since
                // it might serve some unknown purpose.
                log.debug("Segment[%s] already pushed, but we want a unique path, so will push again with a new path.", identifier);
            } else {
                log.info("Segment[%s] already pushed, skipping.", identifier);
                return objectMapper.readValue(descriptorFile, DataSegment.class);
            }
        }
        removeDirectory(mergedTarget);
        if (mergedTarget.exists()) {
            throw new ISE("Merged target[%s] exists after removing?!", mergedTarget);
        }
        final File mergedFile;
        final long mergeFinishTime;
        final long startTime = System.nanoTime();
        List<QueryableIndex> indexes = new ArrayList<>();
        Closer closer = Closer.create();
        try {
            for (FireHydrant fireHydrant : sink) {
                Pair<ReferenceCountingSegment, Closeable> segmentAndCloseable = fireHydrant.getAndIncrementSegment();
                final QueryableIndex queryableIndex = segmentAndCloseable.lhs.asQueryableIndex();
                log.debug("Segment[%s] adding hydrant[%s]", identifier, fireHydrant);
                indexes.add(queryableIndex);
                closer.register(segmentAndCloseable.rhs);
            }
            mergedFile = indexMerger.mergeQueryableIndex(indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), schema.getDimensionsSpec(), mergedTarget, tuningConfig.getIndexSpec(), tuningConfig.getIndexSpecForIntermediatePersists(), new BaseProgressIndicator(), tuningConfig.getSegmentWriteOutMediumFactory(), tuningConfig.getMaxColumnsToMerge());
            mergeFinishTime = System.nanoTime();
            log.debug("Segment[%s] built in %,dms.", identifier, (mergeFinishTime - startTime) / 1000000);
        } catch (Throwable t) {
            throw closer.rethrow(t);
        } finally {
            closer.close();
        }
        final DataSegment segmentToPush = sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes, schema.getDimensionsSpec()));
        // Retry pushing segments because uploading to deep storage might fail especially for cloud storage types
        final DataSegment segment = RetryUtils.retry(// semantics.
        () -> dataSegmentPusher.push(mergedFile, segmentToPush, useUniquePath), exception -> exception instanceof Exception, 5);
        final long pushFinishTime = System.nanoTime();
        objectMapper.writeValue(descriptorFile, segment);
        log.info("Segment[%s] of %,d bytes " + "built from %d incremental persist(s) in %,dms; " + "pushed to deep storage in %,dms. " + "Load spec is: %s", identifier, segment.getSize(), indexes.size(), (mergeFinishTime - startTime) / 1000000, (pushFinishTime - mergeFinishTime) / 1000000, objectMapper.writeValueAsString(segment.getLoadSpec()));
        return segment;
    } catch (Exception e) {
        metrics.incrementFailedHandoffs();
        log.warn(e, "Failed to push merged index for segment[%s].", identifier);
        throw new RuntimeException(e);
    }
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) QueryableIndex(org.apache.druid.segment.QueryableIndex) ISE(org.apache.druid.java.util.common.ISE) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) File(java.io.File) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator) Nullable(javax.annotation.Nullable)

Example 29 with FireHydrant

use of org.apache.druid.segment.realtime.FireHydrant in project druid by apache.

the class Sink method makeNewCurrIndex.

private FireHydrant makeNewCurrIndex(long minTimestamp, DataSchema schema) {
    final IncrementalIndexSchema indexSchema = new IncrementalIndexSchema.Builder().withMinTimestamp(minTimestamp).withTimestampSpec(schema.getTimestampSpec()).withQueryGranularity(schema.getGranularitySpec().getQueryGranularity()).withDimensionsSpec(schema.getDimensionsSpec()).withMetrics(schema.getAggregators()).withRollup(schema.getGranularitySpec().isRollup()).build();
    // Build the incremental-index according to the spec that was chosen by the user
    final IncrementalIndex newIndex = appendableIndexSpec.builder().setIndexSchema(indexSchema).setMaxRowCount(maxRowsInMemory).setMaxBytesInMemory(maxBytesInMemory).setUseMaxMemoryEstimates(useMaxMemoryEstimates).build();
    final FireHydrant old;
    synchronized (hydrantLock) {
        if (writable) {
            old = currHydrant;
            int newCount = 0;
            int numHydrants = hydrants.size();
            if (numHydrants > 0) {
                FireHydrant lastHydrant = hydrants.get(numHydrants - 1);
                newCount = lastHydrant.getCount() + 1;
                if (!indexSchema.getDimensionsSpec().hasCustomDimensions()) {
                    Map<String, ColumnCapabilities> oldCapabilities;
                    if (lastHydrant.hasSwapped()) {
                        oldCapabilities = new HashMap<>();
                        ReferenceCountingSegment segment = lastHydrant.getIncrementedSegment();
                        try {
                            QueryableIndex oldIndex = segment.asQueryableIndex();
                            for (String dim : oldIndex.getAvailableDimensions()) {
                                dimOrder.add(dim);
                                oldCapabilities.put(dim, oldIndex.getColumnHolder(dim).getCapabilities());
                            }
                        } finally {
                            segment.decrement();
                        }
                    } else {
                        IncrementalIndex oldIndex = lastHydrant.getIndex();
                        dimOrder.addAll(oldIndex.getDimensionOrder());
                        oldCapabilities = oldIndex.getColumnCapabilities();
                    }
                    newIndex.loadDimensionIterable(dimOrder, oldCapabilities);
                }
            }
            currHydrant = new FireHydrant(newIndex, newCount, getSegment().getId());
            if (old != null) {
                numRowsExcludingCurrIndex.addAndGet(old.getIndex().size());
            }
            hydrants.add(currHydrant);
        } else {
            // Oops, someone called finishWriting while we were making this new index.
            newIndex.close();
            throw new ISE("finishWriting() called during swap");
        }
    }
    return old;
}
Also used : ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) QueryableIndex(org.apache.druid.segment.QueryableIndex) ISE(org.apache.druid.java.util.common.ISE) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) ColumnCapabilities(org.apache.druid.segment.column.ColumnCapabilities)

Example 30 with FireHydrant

use of org.apache.druid.segment.realtime.FireHydrant in project druid by druid-io.

the class AppenderatorImpl method mergeAndPush.

/**
 * Merge segment, push to deep storage. Should only be used on segments that have been fully persisted. Must only
 * be run in the single-threaded pushExecutor.
 *
 * @param identifier    sink identifier
 * @param sink          sink to push
 * @param useUniquePath true if the segment should be written to a path with a unique identifier
 *
 * @return segment descriptor, or null if the sink is no longer valid
 */
@Nullable
private DataSegment mergeAndPush(final SegmentIdWithShardSpec identifier, final Sink sink, final boolean useUniquePath) {
    // noinspection ObjectEquality
    if (sinks.get(identifier) != sink) {
        log.warn("Sink for segment[%s] no longer valid, bailing out of mergeAndPush.", identifier);
        return null;
    }
    // Use a descriptor file to indicate that pushing has completed.
    final File persistDir = computePersistDir(identifier);
    final File mergedTarget = new File(persistDir, "merged");
    final File descriptorFile = computeDescriptorFile(identifier);
    // Sanity checks
    for (FireHydrant hydrant : sink) {
        if (sink.isWritable()) {
            throw new ISE("Expected sink to be no longer writable before mergeAndPush for segment[%s].", identifier);
        }
        synchronized (hydrant) {
            if (!hydrant.hasSwapped()) {
                throw new ISE("Expected sink to be fully persisted before mergeAndPush for segment[%s].", identifier);
            }
        }
    }
    try {
        if (descriptorFile.exists()) {
            if (useUniquePath) {
                // Don't reuse the descriptor, because the caller asked for a unique path. Leave the old one as-is, since
                // it might serve some unknown purpose.
                log.debug("Segment[%s] already pushed, but we want a unique path, so will push again with a new path.", identifier);
            } else {
                log.info("Segment[%s] already pushed, skipping.", identifier);
                return objectMapper.readValue(descriptorFile, DataSegment.class);
            }
        }
        removeDirectory(mergedTarget);
        if (mergedTarget.exists()) {
            throw new ISE("Merged target[%s] exists after removing?!", mergedTarget);
        }
        final File mergedFile;
        final long mergeFinishTime;
        final long startTime = System.nanoTime();
        List<QueryableIndex> indexes = new ArrayList<>();
        Closer closer = Closer.create();
        try {
            for (FireHydrant fireHydrant : sink) {
                // if batch, swap/persist did not memory map the incremental index, we need it mapped now:
                if (!isOpenSegments()) {
                    // sanity
                    Pair<File, SegmentId> persistedMetadata = persistedHydrantMetadata.get(fireHydrant);
                    if (persistedMetadata == null) {
                        throw new ISE("Persisted metadata for batch hydrant [%s] is null!", fireHydrant);
                    }
                    File persistedFile = persistedMetadata.lhs;
                    SegmentId persistedSegmentId = persistedMetadata.rhs;
                    // sanity:
                    if (persistedFile == null) {
                        throw new ISE("Persisted file for batch hydrant [%s] is null!", fireHydrant);
                    } else if (persistedSegmentId == null) {
                        throw new ISE("Persisted segmentId for batch hydrant in file [%s] is null!", persistedFile.getPath());
                    }
                    fireHydrant.swapSegment(new QueryableIndexSegment(indexIO.loadIndex(persistedFile), persistedSegmentId));
                }
                Pair<ReferenceCountingSegment, Closeable> segmentAndCloseable = fireHydrant.getAndIncrementSegment();
                final QueryableIndex queryableIndex = segmentAndCloseable.lhs.asQueryableIndex();
                log.debug("Segment[%s] adding hydrant[%s]", identifier, fireHydrant);
                indexes.add(queryableIndex);
                closer.register(segmentAndCloseable.rhs);
            }
            mergedFile = indexMerger.mergeQueryableIndex(indexes, schema.getGranularitySpec().isRollup(), schema.getAggregators(), schema.getDimensionsSpec(), mergedTarget, tuningConfig.getIndexSpec(), tuningConfig.getIndexSpecForIntermediatePersists(), new BaseProgressIndicator(), tuningConfig.getSegmentWriteOutMediumFactory(), tuningConfig.getMaxColumnsToMerge());
            mergeFinishTime = System.nanoTime();
            log.debug("Segment[%s] built in %,dms.", identifier, (mergeFinishTime - startTime) / 1000000);
        } catch (Throwable t) {
            throw closer.rethrow(t);
        } finally {
            closer.close();
        }
        final DataSegment segmentToPush = sink.getSegment().withDimensions(IndexMerger.getMergedDimensionsFromQueryableIndexes(indexes, schema.getDimensionsSpec()));
        // Retry pushing segments because uploading to deep storage might fail especially for cloud storage types
        final DataSegment segment = RetryUtils.retry(// semantics.
        () -> dataSegmentPusher.push(mergedFile, segmentToPush, useUniquePath), exception -> exception instanceof Exception, 5);
        if (!isOpenSegments()) {
            // can generate OOMs during merge if enough of them are held back...
            for (FireHydrant fireHydrant : sink) {
                fireHydrant.swapSegment(null);
            }
        }
        final long pushFinishTime = System.nanoTime();
        objectMapper.writeValue(descriptorFile, segment);
        log.info("Segment[%s] of %,d bytes " + "built from %d incremental persist(s) in %,dms; " + "pushed to deep storage in %,dms. " + "Load spec is: %s", identifier, segment.getSize(), indexes.size(), (mergeFinishTime - startTime) / 1000000, (pushFinishTime - mergeFinishTime) / 1000000, objectMapper.writeValueAsString(segment.getLoadSpec()));
        return segment;
    } catch (Exception e) {
        metrics.incrementFailedHandoffs();
        log.warn(e, "Failed to push merged index for segment[%s].", identifier);
        throw new RuntimeException(e);
    }
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) SegmentId(org.apache.druid.timeline.SegmentId) Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) QueryableIndex(org.apache.druid.segment.QueryableIndex) ISE(org.apache.druid.java.util.common.ISE) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) File(java.io.File) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator) Nullable(javax.annotation.Nullable)

Aggregations

FireHydrant (org.apache.druid.segment.realtime.FireHydrant)38 IOException (java.io.IOException)26 ArrayList (java.util.ArrayList)26 IndexSizeExceededException (org.apache.druid.segment.incremental.IndexSizeExceededException)22 File (java.io.File)20 ISE (org.apache.druid.java.util.common.ISE)20 QueryableIndex (org.apache.druid.segment.QueryableIndex)16 ExecutionException (java.util.concurrent.ExecutionException)12 Nullable (javax.annotation.Nullable)12 QueryableIndexSegment (org.apache.druid.segment.QueryableIndexSegment)12 ReferenceCountingSegment (org.apache.druid.segment.ReferenceCountingSegment)12 Sink (org.apache.druid.segment.realtime.plumber.Sink)12 Closer (org.apache.druid.java.util.common.io.Closer)10 BaseProgressIndicator (org.apache.druid.segment.BaseProgressIndicator)10 DataSegment (org.apache.druid.timeline.DataSegment)10 Interval (org.joda.time.Interval)10 Stopwatch (com.google.common.base.Stopwatch)8 Closeable (java.io.Closeable)8 List (java.util.List)8 Pair (org.apache.druid.java.util.common.Pair)8