use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class OperationLogTestBase method getExpectedLengths.
/**
* Given a list of LogOperations, calculates the final lengths of the StreamSegments that are encountered, by inspecting
* every StreamSegmentAppendOperation and MergeSegmentOperation. All other types of Log Operations are ignored.
*/
private AbstractMap<Long, Integer> getExpectedLengths(Collection<OperationWithCompletion> operations) {
HashMap<Long, Integer> result = new HashMap<>();
for (OperationWithCompletion o : operations) {
Assert.assertTrue("Operation is not completed.", o.completion.isDone());
if (o.completion.isCompletedExceptionally()) {
// This is a failed operation; ignore it.
continue;
}
if (o.operation instanceof StreamSegmentAppendOperation) {
StreamSegmentAppendOperation appendOperation = (StreamSegmentAppendOperation) o.operation;
result.put(appendOperation.getStreamSegmentId(), result.getOrDefault(appendOperation.getStreamSegmentId(), 0) + appendOperation.getData().getLength());
} else if (o.operation instanceof MergeSegmentOperation) {
MergeSegmentOperation mergeOperation = (MergeSegmentOperation) o.operation;
result.put(mergeOperation.getStreamSegmentId(), result.getOrDefault(mergeOperation.getStreamSegmentId(), 0) + result.getOrDefault(mergeOperation.getSourceSegmentId(), 0));
result.remove(mergeOperation.getSourceSegmentId());
}
}
return result;
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class OperationLogTestBase method getExpectedContents.
/**
* Given a list of Log Operations, generates an InputStream for each encountered StreamSegment that contains the final
* contents of that StreamSegment. Only considers operations of type StreamSegmentAppendOperation and MergeSegmentOperation.
*/
private AbstractMap<Long, InputStream> getExpectedContents(Collection<OperationWithCompletion> operations) {
HashMap<Long, List<InputStream>> partialContents = new HashMap<>();
for (OperationWithCompletion o : operations) {
Assert.assertTrue("Operation is not completed.", o.completion.isDone());
if (o.completion.isCompletedExceptionally()) {
// This is failed operation; ignore it.
continue;
}
if (o.operation instanceof StreamSegmentAppendOperation) {
StreamSegmentAppendOperation appendOperation = (StreamSegmentAppendOperation) o.operation;
List<InputStream> segmentContents = partialContents.get(appendOperation.getStreamSegmentId());
if (segmentContents == null) {
segmentContents = new ArrayList<>();
partialContents.put(appendOperation.getStreamSegmentId(), segmentContents);
}
segmentContents.add(appendOperation.getData().getReader());
} else if (o.operation instanceof MergeSegmentOperation) {
MergeSegmentOperation mergeOperation = (MergeSegmentOperation) o.operation;
List<InputStream> targetSegmentContents = partialContents.get(mergeOperation.getStreamSegmentId());
if (targetSegmentContents == null) {
targetSegmentContents = new ArrayList<>();
partialContents.put(mergeOperation.getStreamSegmentId(), targetSegmentContents);
}
List<InputStream> sourceSegmentContents = partialContents.get(mergeOperation.getSourceSegmentId());
targetSegmentContents.addAll(sourceSegmentContents);
partialContents.remove(mergeOperation.getSourceSegmentId());
}
}
// Construct final result.
HashMap<Long, InputStream> result = new HashMap<>();
for (Map.Entry<Long, List<InputStream>> e : partialContents.entrySet()) {
result.put(e.getKey(), new SequenceInputStream(Iterators.asEnumeration(e.getValue().iterator())));
}
return result;
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class OperationMetadataUpdaterTests method mergeTransaction.
private void mergeTransaction(long transactionId, OperationMetadataUpdater updater, UpdateableContainerMetadata referenceMetadata) throws Exception {
long parentSegmentId = updater.getStreamSegmentMetadata(transactionId).getAttributes().get(PARENT_ID);
val op = new MergeSegmentOperation(parentSegmentId, transactionId);
process(op, updater);
if (referenceMetadata != null) {
referenceMetadata.getStreamSegmentMetadata(transactionId).markMerged();
val rsm = referenceMetadata.getStreamSegmentMetadata(parentSegmentId);
rsm.setLength(rsm.getLength() + op.getLength());
}
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class SegmentAggregator method reconcileMergeOperation.
/**
* Attempts to reconcile the given MergeSegmentOperation.
*
* @param op The Operation to reconcile.
* @param storageInfo The current state of the Segment in Storage.
* @param timer Timer for the operation
* @return A CompletableFuture containing a FlushResult with the number of bytes reconciled, or failed with a ReconciliationFailureException,
* if the operation cannot be reconciled, based on the in-memory metadata or the current state of the Segment in Storage.
*/
private CompletableFuture<WriterFlushResult> reconcileMergeOperation(MergeSegmentOperation op, SegmentProperties storageInfo, TimeoutTimer timer) {
// Verify that the transaction segment is still registered in metadata.
UpdateableSegmentMetadata transactionMeta = this.dataSource.getStreamSegmentMetadata(op.getSourceSegmentId());
if (transactionMeta == null) {
return Futures.failedFuture(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the source segment is missing from the metadata.", op), this.metadata, storageInfo));
}
// Verify that the operation fits fully within this segment (mergers are atomic - they either merge all or nothing).
if (op.getLastStreamSegmentOffset() > storageInfo.getLength()) {
return Futures.failedFuture(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the source segment is not fully merged into the target.", op), this.metadata, storageInfo));
}
// Verify that the transaction segment does not exist in Storage anymore.
return this.storage.exists(transactionMeta.getName(), timer.getRemaining()).thenComposeAsync(exists -> {
if (exists) {
return Futures.failedFuture(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment still exists in Storage.", op), this.metadata, storageInfo));
}
// Clear out any attributes.
return this.dataSource.deleteAllAttributes(transactionMeta, timer.getRemaining());
}, this.executor).thenApplyAsync(v -> {
// Reconciliation complete. Pop the first operation off the list and update the metadata for the transaction segment.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeSegmentOperation : "First outstanding operation was not a MergeSegmentOperation";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
// Since the operation is already reconciled, the StorageLength of this Segment must be at least
// the last offset of the operation. We are about to invoke ReadIndex.completeMerge(), which requires
// that this value be set to at least the last offset of the merged Segment, so we need to ensure it's
// set now. This will also be set at the end of reconciliation, but we cannot wait until then to invoke
// the callbacks.
long minStorageLength = processedOperation.getLastStreamSegmentOffset();
if (this.metadata.getStorageLength() < minStorageLength) {
this.metadata.setStorageLength(minStorageLength);
}
updateMetadataForTransactionPostMerger(transactionMeta, processedOperation.getStreamSegmentId());
return new WriterFlushResult().withMergedBytes(op.getLength());
}, this.executor);
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class SegmentAggregator method initialize.
// endregion
// region Operations
/**
* Initializes the SegmentAggregator by pulling information from the given Storage.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will indicate that the operation finished successfully. If any
* errors occurred during the operation, the Future will be completed with the appropriate exception.
*/
CompletableFuture<Void> initialize(Duration timeout) {
Exceptions.checkNotClosed(isClosed(), this);
Preconditions.checkState(this.state.get() == AggregatorState.NotInitialized, "SegmentAggregator has already been initialized.");
assert this.handle.get() == null : "non-null handle but state == " + this.state.get();
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "initialize");
if (this.metadata.isDeleted()) {
// Segment is dead on arrival. Delete it from Storage (if it exists) and do not bother to do anything else with it).
// This is a rather uncommon case, but it can happen in one of two cases: 1) the segment has been deleted
// immediately after creation or 2) after a container recovery.
log.info("{}: Segment '{}' is marked as Deleted in Metadata. Attempting Storage delete.", this.traceObjectId, this.metadata.getName());
return Futures.exceptionallyExpecting(this.storage.openWrite(this.metadata.getName()).thenComposeAsync(handle -> this.storage.delete(handle, timeout), this.executor), ex -> ex instanceof StreamSegmentNotExistsException, // It's OK if already deleted.
null).thenRun(() -> {
updateMetadataPostDeletion(this.metadata);
log.info("{}: Segment '{}' is marked as Deleted in Metadata and has been deleted from Storage. Ignoring all further operations on it.", this.traceObjectId, this.metadata.getName());
setState(AggregatorState.Writing);
LoggerHelpers.traceLeave(log, this.traceObjectId, "initialize", traceId);
});
}
// Segment not deleted.
return openWrite(this.metadata.getName(), this.handle, timeout).thenAcceptAsync(segmentInfo -> {
// Check & Update StorageLength in metadata.
if (this.metadata.getStorageLength() != segmentInfo.getLength()) {
if (this.metadata.getStorageLength() >= 0) {
// Only log warning if the StorageLength has actually been initialized, but is different.
log.info("{}: SegmentMetadata has a StorageLength ({}) that is different than the actual one ({}) - updating metadata.", this.traceObjectId, this.metadata.getStorageLength(), segmentInfo.getLength());
}
// It is very important to keep this value up-to-date and correct.
this.metadata.setStorageLength(segmentInfo.getLength());
}
// Check if the Storage segment is sealed, but it's not in metadata (this is 100% indicative of some data corruption happening).
if (segmentInfo.isSealed()) {
if (!this.metadata.isSealed()) {
throw new CompletionException(new DataCorruptionException(String.format("Segment '%s' is sealed in Storage but not in the metadata.", this.metadata.getName())));
}
if (!this.metadata.isSealedInStorage()) {
this.metadata.markSealedInStorage();
log.info("{}: Segment is sealed in Storage but metadata does not reflect that - updating metadata.", this.traceObjectId);
}
}
log.info("{}: Initialized. StorageLength = {}, Sealed = {}.", this.traceObjectId, segmentInfo.getLength(), segmentInfo.isSealed());
LoggerHelpers.traceLeave(log, this.traceObjectId, "initialize", traceId);
setState(AggregatorState.Writing);
}, this.executor).exceptionally(ex -> {
ex = Exceptions.unwrap(ex);
if (ex instanceof StreamSegmentNotExistsException) {
// Segment does not exist in Storage. There are two possibilities here:
if (this.metadata.getStorageLength() == 0 && !this.metadata.isDeletedInStorage()) {
// Segment has never been created because there was nothing to write to it. As long as we know
// its expected length is zero, this is a valid case.
this.handle.set(null);
log.info("{}: Initialized. Segment does not exist in Storage but Metadata indicates it should be empty.", this.traceObjectId);
if (this.metadata.isSealed() && this.metadata.getLength() == 0) {
// Truly an empty segment that is sealed; mark it as such in Storage.
this.metadata.markSealedInStorage();
log.info("{}: Segment does not exist in Storage, but Metadata indicates it is empty and sealed - marking as sealed in storage.", this.traceObjectId);
}
} else {
// Segment does not exist anymore. This is a real possibility during recovery, in the following cases:
// * We already processed a Segment Deletion but did not have a chance to checkpoint metadata
// * We processed a MergeSegmentOperation but did not have a chance to ack/truncate the DataSource
// Update metadata, just in case it is not already updated.
updateMetadataPostDeletion(this.metadata);
log.info("{}: Segment '{}' does not exist in Storage. Ignoring all further operations on it.", this.traceObjectId, this.metadata.getName());
}
setState(AggregatorState.Writing);
LoggerHelpers.traceLeave(log, this.traceObjectId, "initialize", traceId);
} else {
// Other kind of error - re-throw.
throw new CompletionException(ex);
}
return null;
});
}
Aggregations