use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class SegmentAggregator method flushPendingAppends.
/**
* Flushes all Append Operations that can be flushed up to the maximum allowed flush size.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain the result from the flush operation.
*/
private CompletableFuture<WriterFlushResult> flushPendingAppends(Duration timeout) {
// Gather an InputStream made up of all the operations we can flush.
BufferView flushData;
try {
flushData = getFlushData();
} catch (DataCorruptionException ex) {
return Futures.failedFuture(ex);
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushPendingAppends");
// Flush them.
TimeoutTimer timer = new TimeoutTimer(timeout);
CompletableFuture<Void> flush;
if (flushData == null || flushData.getLength() == 0) {
flush = CompletableFuture.completedFuture(null);
} else {
flush = createSegmentIfNecessary(() -> this.storage.write(this.handle.get(), this.metadata.getStorageLength(), flushData.getReader(), flushData.getLength(), timer.getRemaining()), timer.getRemaining());
}
return flush.thenApplyAsync(v -> {
WriterFlushResult result = updateStatePostFlush(flushData);
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
if (Exceptions.unwrap(ex) instanceof BadOffsetException) {
// We attempted to write at an offset that already contained other data. This can happen for a number of
// reasons, but we do not have enough information here to determine why. We need to enter reconciliation
// mode, which will determine the actual state of the segment in storage and take appropriate actions.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class TableCompactor method compact.
/**
* Performs a compaction of the Table Segment. Refer to this class' Javadoc for a description of the compaction process.
*
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, indicate the compaction completed. When this future completes,
* some of the Segment's Table Attributes may change to reflect the modifications to the Segment and/or compaction progress.
* Notable exceptions:
* <ul>
* <li>{@link BadAttributeUpdateException} If the {@link TableAttributes#COMPACTION_OFFSET} changed while this method
* was executing. In this case, no change will be performed and it can be resolved with a retry.</li>
* </ul>
*/
CompletableFuture<Void> compact(TimeoutTimer timer) {
long startOffset = getCompactionStartOffset();
int maxLength = (int) Math.min(this.config.getMaxCompactionSize(), getLastIndexedOffset() - startOffset);
if (startOffset < 0 || maxLength < 0) {
// The Segment's Compaction offset must be a value between 0 and the current LastIndexedOffset.
return Futures.failedFuture(new DataCorruptionException(String.format("%s: '%s' has CompactionStartOffset=%s and CompactionLength=%s.", this.traceLogId, this.metadata.getName(), startOffset, maxLength)));
} else if (maxLength == 0) {
// Nothing to do.
log.debug("{}: Up to date.", this.traceLogId);
return CompletableFuture.completedFuture(null);
}
// Read the Table Entries beginning at the specified offset, without exceeding the given maximum length.
return getRetryPolicy().runAsync(() -> readCandidates(startOffset, maxLength, timer).thenComposeAsync(candidates -> excludeObsolete(candidates, timer).thenComposeAsync(v -> copyCandidates(candidates, timer), this.executor), this.executor), this.executor);
}
use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class WriterTableProcessor method add.
// endregion
// region WriterSegmentProcessor Implementation
@Override
public void add(SegmentOperation operation) throws DataCorruptionException {
Exceptions.checkNotClosed(this.closed.get(), this);
Preconditions.checkArgument(operation.getStreamSegmentId() == this.connector.getMetadata().getId(), "Operation '%s' refers to a different Segment than this one (%s).", operation, this.connector.getMetadata().getId());
Preconditions.checkArgument(operation.getSequenceNumber() != Operation.NO_SEQUENCE_NUMBER, "Operation '%s' does not have a Sequence Number assigned.", operation);
if (this.connector.getMetadata().isDeleted() || !(operation instanceof CachedStreamSegmentAppendOperation)) {
// Segment is either deleted or this is not an append operation. Nothing for us to do here.
return;
}
CachedStreamSegmentAppendOperation append = (CachedStreamSegmentAppendOperation) operation;
if (this.lastAddedOffset.get() >= 0) {
// We have processed at least one operation so far. Verify operations are contiguous.
if (this.lastAddedOffset.get() != append.getStreamSegmentOffset()) {
throw new DataCorruptionException(String.format("Wrong offset for Operation '%s'. Expected: %s, actual: %d.", operation, this.lastAddedOffset, append.getStreamSegmentOffset()));
}
} else {
// offset and not skipping any updates.
if (this.aggregator.getLastIndexedOffset() < append.getStreamSegmentOffset()) {
throw new DataCorruptionException(String.format("Operation '%s' begins after TABLE_INDEXED_OFFSET. Expected: %s, actual: %d.", operation, this.aggregator.getLastIndexedOffset(), append.getStreamSegmentOffset()));
}
}
if (append.getStreamSegmentOffset() >= this.aggregator.getLastIndexedOffset()) {
// Operation has not been indexed yet; add it to the internal list so we can process it.
// NOTE: appends that contain more than one TableEntry (for batch updates) will be indexed atomically (either
// all Table Entries are indexed or none), so it is safe to compare this with the first offset of the append.
this.aggregator.add(append);
this.lastAddedOffset.set(append.getLastStreamSegmentOffset());
log.debug("{}: Add {} (State={}).", this.traceObjectId, operation, this.aggregator);
} else {
log.debug("{}: Skipped {} (State={}).", this.traceObjectId, operation, this.aggregator);
}
}
use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class StorageWriter method getProcessor.
// endregion
// region Helpers
/**
* Gets, or creates, a SegmentAggregator for the given StorageOperation.
*
* @param streamSegmentId The Id of the StreamSegment to get the aggregator for.
*/
private CompletableFuture<ProcessorCollection> getProcessor(long streamSegmentId) {
ProcessorCollection existingProcessor = this.processors.getOrDefault(streamSegmentId, null);
if (existingProcessor != null) {
if (closeIfNecessary(existingProcessor).isClosed()) {
// Existing SegmentAggregator has become stale (most likely due to its SegmentMetadata being evicted),
// so it has been closed and we need to create a new one.
this.processors.remove(streamSegmentId);
} else {
return CompletableFuture.completedFuture(existingProcessor);
}
}
// Get the SegmentAggregator's Metadata.
UpdateableSegmentMetadata segmentMetadata = this.dataSource.getStreamSegmentMetadata(streamSegmentId);
if (segmentMetadata == null) {
return Futures.failedFuture(new DataCorruptionException(String.format("No StreamSegment with id '%d' is registered in the metadata.", streamSegmentId)));
}
// Then create the aggregator, and only register it after a successful initialization. Otherwise we risk
// having a registered aggregator that is not initialized.
SegmentAggregator segmentAggregator = new SegmentAggregator(segmentMetadata, this.dataSource, this.storage, this.config, this.timer, this.executor);
AttributeAggregator attributeAggregator = segmentMetadata.getType().isTransientSegment() ? null : new AttributeAggregator(segmentMetadata, this.dataSource, this.config, this.timer, this.executor);
ProcessorCollection pc = new ProcessorCollection(segmentAggregator, attributeAggregator, this.createProcessors.apply(segmentMetadata));
try {
CompletableFuture<Void> init = segmentAggregator.initialize(this.config.getFlushTimeout());
Futures.exceptionListener(init, ex -> segmentAggregator.close());
return init.thenApply(ignored -> {
this.processors.put(streamSegmentId, pc);
return pc;
});
} catch (Exception ex) {
pc.close();
throw ex;
}
}
Aggregations