use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.
the class SegmentAggregator method mergeWith.
/**
* Merges the Transaction StreamSegment with given metadata into this one at the current offset.
*
* @param transactionMetadata The metadata of the Transaction StreamSegment to merge.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the number of bytes that were merged into this
* StreamSegment. If failed, the Future will contain the exception that caused it.
*/
private CompletableFuture<FlushResult> mergeWith(UpdateableSegmentMetadata transactionMetadata, MergeTransactionOperation mergeOp, TimeoutTimer timer) {
if (transactionMetadata.isDeleted()) {
return Futures.failedFuture(new DataCorruptionException(String.format("Attempted to merge with deleted Transaction segment '%s'.", transactionMetadata.getName())));
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "mergeWith", transactionMetadata.getId(), transactionMetadata.getName(), transactionMetadata.isSealedInStorage());
FlushResult result = new FlushResult();
if (!transactionMetadata.isSealedInStorage() || transactionMetadata.getLength() > transactionMetadata.getStorageLength()) {
// Nothing to do. Given Transaction is not eligible for merger yet.
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return CompletableFuture.completedFuture(result);
}
AtomicLong mergedLength = new AtomicLong();
return this.storage.getStreamSegmentInfo(transactionMetadata.getName(), timer.getRemaining()).thenAcceptAsync(transProperties -> {
// Check that the Storage agrees with our metadata (if not, we have a problem ...)
if (transProperties.getLength() != transactionMetadata.getStorageLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because its metadata disagrees with the Storage. Metadata.StorageLength=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), transactionMetadata.getStorageLength(), transProperties.getLength())));
}
if (transProperties.getLength() != mergeOp.getLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because the declared length in the operation disagrees with the Storage. Operation.Length=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), mergeOp.getLength(), transProperties.getLength())));
}
mergedLength.set(transProperties.getLength());
}, this.executor).thenComposeAsync(v1 -> storage.concat(this.handle.get(), mergeOp.getStreamSegmentOffset(), transactionMetadata.getName(), timer.getRemaining()), this.executor).thenComposeAsync(v2 -> storage.getStreamSegmentInfo(this.metadata.getName(), timer.getRemaining()), this.executor).thenApplyAsync(segmentProperties -> {
// We have processed a MergeTransactionOperation, pop the first operation off and decrement the counter.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeTransactionOperation : "First outstanding operation was not a MergeTransactionOperation";
assert ((MergeTransactionOperation) processedOperation).getTransactionSegmentId() == transactionMetadata.getId() : "First outstanding operation was a MergeTransactionOperation for the wrong Transaction id.";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
// Post-merger validation. Verify we are still in agreement with the storage.
long expectedNewLength = this.metadata.getStorageLength() + mergedLength.get();
if (segmentProperties.getLength() != expectedNewLength) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' was merged into parent '%s' but the parent segment has an unexpected StorageLength after the merger. Previous=%d, MergeLength=%d, Expected=%d, Actual=%d", transactionMetadata.getName(), this.metadata.getName(), segmentProperties.getLength(), mergedLength.get(), expectedNewLength, segmentProperties.getLength())));
}
updateMetadata(segmentProperties);
updateMetadataForTransactionPostMerger(transactionMetadata);
this.lastFlush.set(this.timer.getElapsed());
result.withMergedBytes(mergedLength.get());
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
Throwable realEx = Exceptions.unwrap(ex);
if (realEx instanceof BadOffsetException || realEx instanceof StreamSegmentNotExistsException) {
// We either attempted to write at an offset that already contained other data or the Transaction
// Segment no longer exists. This can happen for a number of reasons, but we do not have enough
// information here to determine why. We need to enter reconciliation mode, and hope for the best.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.
the class SegmentAggregator method mergeIfNecessary.
/**
* Executes a merger of a Transaction StreamSegment into this one.
* Conditions for merger:
* <ul>
* <li> This StreamSegment is stand-alone (not a Transaction).
* <li> The next outstanding operation is a MergeTransactionOperation for a Transaction StreamSegment of this StreamSegment.
* <li> The StreamSegment to merge is not deleted, it is sealed and is fully flushed to Storage.
* </ul>
* Effects of the merger:
* <ul> The entire contents of the given Transaction StreamSegment will be concatenated to this StreamSegment as one unit.
* <li> The metadata for this StreamSegment will be updated to reflect the new length of this StreamSegment.
* <li> The given Transaction Segment will cease to exist.
* </ul>
* <p>
* Note that various other data integrity checks are done pre and post merger as part of this operation which are meant
* to ensure the StreamSegment is not in a corrupted state.
*
* @param flushResult The flush result from the previous chained operation.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the number of bytes that were merged into this
* StreamSegment. If failed, the Future will contain the exception that caused it.
*/
private CompletableFuture<FlushResult> mergeIfNecessary(FlushResult flushResult, TimeoutTimer timer) {
ensureInitializedAndNotClosed();
assert !this.metadata.isTransaction() : "Cannot merge into a Transaction StreamSegment.";
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "mergeIfNecessary");
StorageOperation first = this.operations.getFirst();
if (first == null || !(first instanceof MergeTransactionOperation)) {
// Either no operation or first operation is not a MergeTransaction. Nothing to do.
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeIfNecessary", traceId, flushResult);
return CompletableFuture.completedFuture(flushResult);
}
MergeTransactionOperation mergeTransactionOperation = (MergeTransactionOperation) first;
UpdateableSegmentMetadata transactionMetadata = this.dataSource.getStreamSegmentMetadata(mergeTransactionOperation.getTransactionSegmentId());
return mergeWith(transactionMetadata, mergeTransactionOperation, timer).thenApply(mergeResult -> {
flushResult.withFlushResult(mergeResult);
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeIfNecessary", traceId, flushResult);
return flushResult;
});
}
use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.
the class SegmentAggregator method getFlushArgs.
/**
* Returns a FlushArgs which contains the data needing to be flushed to Storage.
*
* @return The aggregated object that can be used for flushing.
* @throws DataCorruptionException If a unable to retrieve required data from the Data Source.
*/
private FlushArgs getFlushArgs() throws DataCorruptionException {
StorageOperation first = this.operations.getFirst();
if (!(first instanceof AggregatedAppendOperation)) {
// Nothing to flush - first operation is not an AggregatedAppend.
return new FlushArgs(null, 0);
}
AggregatedAppendOperation appendOp = (AggregatedAppendOperation) first;
int length = (int) appendOp.getLength();
InputStream data = this.dataSource.getAppendData(appendOp.getStreamSegmentId(), appendOp.getStreamSegmentOffset(), length);
if (data == null) {
if (this.metadata.isDeleted()) {
// Segment was deleted - nothing more to do.
return new FlushArgs(null, 0);
}
throw new DataCorruptionException(String.format("Unable to retrieve CacheContents for '%s'.", appendOp));
}
appendOp.seal();
return new FlushArgs(data, length);
}
use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.
the class SegmentAggregator method reconcile.
private CompletableFuture<FlushResult> reconcile(TimeoutTimer timer) {
assert this.state.get() == AggregatorState.Reconciling : "reconcile cannot be called if state == " + this.state;
ReconciliationState rc = this.reconciliationState.get();
assert rc != null : "reconciliationState is null";
SegmentProperties storageInfo = rc.getStorageInfo();
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "reconcile", rc);
// Process each Operation in sequence, as long as its starting offset is less than ReconciliationState.getStorageInfo().getLength()
FlushResult result = new FlushResult();
AtomicBoolean exceededStorageLength = new AtomicBoolean(false);
return Futures.loop(() -> this.operations.size() > 0 && !exceededStorageLength.get(), () -> {
StorageOperation op = this.operations.getFirst();
return reconcileOperation(op, storageInfo, timer).thenApply(partialFlushResult -> {
if (op.getLastStreamSegmentOffset() >= storageInfo.getLength()) {
// This operation crosses the boundary of StorageLength. It has been reconciled,
// and as such it is the last operation that we need to inspect.
exceededStorageLength.set(true);
}
log.info("{}: Reconciled {} ({}).", this.traceObjectId, op, partialFlushResult);
return partialFlushResult;
});
}, result::withFlushResult, this.executor).thenApply(v -> {
updateMetadata(storageInfo);
this.reconciliationState.set(null);
setState(AggregatorState.Writing);
LoggerHelpers.traceLeave(log, this.traceObjectId, "reconcile", traceId, result);
return result;
});
}
Aggregations