use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class SegmentAggregator method mergeWith.
/**
* Merges the Transaction StreamSegment with given metadata into this one at the current offset.
*
* @param transactionMetadata The metadata of the Transaction StreamSegment to merge.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the number of bytes that were merged into this
* StreamSegment. If failed, the Future will contain the exception that caused it.
*/
private CompletableFuture<FlushResult> mergeWith(UpdateableSegmentMetadata transactionMetadata, MergeTransactionOperation mergeOp, TimeoutTimer timer) {
if (transactionMetadata.isDeleted()) {
return Futures.failedFuture(new DataCorruptionException(String.format("Attempted to merge with deleted Transaction segment '%s'.", transactionMetadata.getName())));
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "mergeWith", transactionMetadata.getId(), transactionMetadata.getName(), transactionMetadata.isSealedInStorage());
FlushResult result = new FlushResult();
if (!transactionMetadata.isSealedInStorage() || transactionMetadata.getLength() > transactionMetadata.getStorageLength()) {
// Nothing to do. Given Transaction is not eligible for merger yet.
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return CompletableFuture.completedFuture(result);
}
AtomicLong mergedLength = new AtomicLong();
return this.storage.getStreamSegmentInfo(transactionMetadata.getName(), timer.getRemaining()).thenAcceptAsync(transProperties -> {
// Check that the Storage agrees with our metadata (if not, we have a problem ...)
if (transProperties.getLength() != transactionMetadata.getStorageLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because its metadata disagrees with the Storage. Metadata.StorageLength=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), transactionMetadata.getStorageLength(), transProperties.getLength())));
}
if (transProperties.getLength() != mergeOp.getLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because the declared length in the operation disagrees with the Storage. Operation.Length=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), mergeOp.getLength(), transProperties.getLength())));
}
mergedLength.set(transProperties.getLength());
}, this.executor).thenComposeAsync(v1 -> storage.concat(this.handle.get(), mergeOp.getStreamSegmentOffset(), transactionMetadata.getName(), timer.getRemaining()), this.executor).thenComposeAsync(v2 -> storage.getStreamSegmentInfo(this.metadata.getName(), timer.getRemaining()), this.executor).thenApplyAsync(segmentProperties -> {
// We have processed a MergeTransactionOperation, pop the first operation off and decrement the counter.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeTransactionOperation : "First outstanding operation was not a MergeTransactionOperation";
assert ((MergeTransactionOperation) processedOperation).getTransactionSegmentId() == transactionMetadata.getId() : "First outstanding operation was a MergeTransactionOperation for the wrong Transaction id.";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
// Post-merger validation. Verify we are still in agreement with the storage.
long expectedNewLength = this.metadata.getStorageLength() + mergedLength.get();
if (segmentProperties.getLength() != expectedNewLength) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' was merged into parent '%s' but the parent segment has an unexpected StorageLength after the merger. Previous=%d, MergeLength=%d, Expected=%d, Actual=%d", transactionMetadata.getName(), this.metadata.getName(), segmentProperties.getLength(), mergedLength.get(), expectedNewLength, segmentProperties.getLength())));
}
updateMetadata(segmentProperties);
updateMetadataForTransactionPostMerger(transactionMetadata);
this.lastFlush.set(this.timer.getElapsed());
result.withMergedBytes(mergedLength.get());
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
Throwable realEx = Exceptions.unwrap(ex);
if (realEx instanceof BadOffsetException || realEx instanceof StreamSegmentNotExistsException) {
// We either attempted to write at an offset that already contained other data or the Transaction
// Segment no longer exists. This can happen for a number of reasons, but we do not have enough
// information here to determine why. We need to enter reconciliation mode, and hope for the best.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class SegmentAggregator method getFlushArgs.
/**
* Returns a FlushArgs which contains the data needing to be flushed to Storage.
*
* @return The aggregated object that can be used for flushing.
* @throws DataCorruptionException If a unable to retrieve required data from the Data Source.
*/
private FlushArgs getFlushArgs() throws DataCorruptionException {
StorageOperation first = this.operations.getFirst();
if (!(first instanceof AggregatedAppendOperation)) {
// Nothing to flush - first operation is not an AggregatedAppend.
return new FlushArgs(null, 0);
}
AggregatedAppendOperation appendOp = (AggregatedAppendOperation) first;
int length = (int) appendOp.getLength();
InputStream data = this.dataSource.getAppendData(appendOp.getStreamSegmentId(), appendOp.getStreamSegmentOffset(), length);
if (data == null) {
if (this.metadata.isDeleted()) {
// Segment was deleted - nothing more to do.
return new FlushArgs(null, 0);
}
throw new DataCorruptionException(String.format("Unable to retrieve CacheContents for '%s'.", appendOp));
}
appendOp.seal();
return new FlushArgs(data, length);
}
use of io.pravega.segmentstore.server.DataCorruptionException in project pravega by pravega.
the class ContainerRecoverCommand method execute.
@Override
public void execute() throws Exception {
ensureArgCount(1);
int containerId = getIntArg(0);
@Cleanup val context = createContext();
val readIndexConfig = getCommandArgs().getState().getConfigBuilder().build().getConfig(ReadIndexConfig::builder);
// We create a special "read-only" BK log that will not be doing fencing or otherwise interfere with an active
// container. As a result, due to the nature of BK, it is possible that it may not contain all the latest writes
// since the Bookies may not have yet synchronized the LAC on the last (active ledger).
@Cleanup val log = context.logFactory.createDebugLogWrapper(containerId);
val bkLog = log.asReadOnly();
val recoveryState = new RecoveryState();
val callbacks = new DebugRecoveryProcessor.OperationCallbacks(recoveryState::newOperation, op -> recoveryState.operationComplete(op, null), recoveryState::operationComplete);
@Cleanup val rp = DebugRecoveryProcessor.create(containerId, bkLog, context.containerConfig, readIndexConfig, getCommandArgs().getState().getExecutor(), callbacks);
try {
rp.performRecovery();
output("Recovery complete: %d DataFrame(s) containing %d Operation(s).", recoveryState.dataFrameCount, recoveryState.operationCount);
} catch (Exception ex) {
output("Recovery FAILED: %d DataFrame(s) containing %d Operation(s) were able to be recovered.", recoveryState.dataFrameCount, recoveryState.operationCount);
ex.printStackTrace(getOut());
Throwable cause = Exceptions.unwrap(ex);
if (cause instanceof DataCorruptionException) {
unwrapDataCorruptionException((DataCorruptionException) cause);
}
}
}
Aggregations