use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method testStorageFailedReads.
/**
* Tests the ability to handle Storage read failures.
*/
@Test
public void testStorageFailedReads() {
// Create all segments (Storage and Metadata).
@Cleanup TestContext context = new TestContext();
ArrayList<Long> segmentIds = createSegments(context);
createSegmentsInStorage(context);
// Read beyond Storage actual offset (metadata is corrupt)
long testSegmentId = segmentIds.get(0);
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(testSegmentId);
sm.setStorageLength(1024 * 1024);
sm.setLength(1024 * 1024);
AssertExtensions.assertThrows("Unexpected exception when attempting to read beyond the Segment length in Storage.", () -> {
@Cleanup ReadResult readResult = context.readIndex.read(testSegmentId, 0, 100, TIMEOUT);
Assert.assertTrue("Unexpected value from hasNext() when there should be at least one ReadResultEntry.", readResult.hasNext());
ReadResultEntry entry = readResult.next();
Assert.assertEquals("Unexpected ReadResultEntryType.", ReadResultEntryType.Storage, entry.getType());
entry.requestContent(TIMEOUT);
entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}, ex -> ex instanceof ArrayIndexOutOfBoundsException);
// Segment not exists (exists in metadata, but not in Storage)
val handle = context.storage.openWrite(sm.getName()).join();
context.storage.delete(handle, TIMEOUT).join();
AssertExtensions.assertThrows("Unexpected exception when attempting to from a segment that exists in Metadata, but not in Storage.", () -> {
@Cleanup ReadResult readResult = context.readIndex.read(testSegmentId, 0, 100, TIMEOUT);
Assert.assertTrue("Unexpected value from hasNext() when there should be at least one ReadResultEntry.", readResult.hasNext());
ReadResultEntry entry = readResult.next();
Assert.assertEquals("Unexpected ReadResultEntryType.", ReadResultEntryType.Storage, entry.getType());
entry.requestContent(TIMEOUT);
entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}, ex -> ex instanceof StreamSegmentNotExistsException);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class SegmentAggregator method reconcileMergeOperation.
/**
* Attempts to reconcile the given MergeTransactionOperation.
*
* @param op The Operation to reconcile.
* @param storageInfo The current state of the Segment in Storage.
* @param timer Timer for the operation
* @return A CompletableFuture containing a FlushResult with the number of bytes reconciled, or failed with a ReconciliationFailureException,
* if the operation cannot be reconciled, based on the in-memory metadata or the current state of the Segment in Storage.
*/
private CompletableFuture<FlushResult> reconcileMergeOperation(MergeTransactionOperation op, SegmentProperties storageInfo, TimeoutTimer timer) {
// Verify that the transaction segment is still registered in metadata.
UpdateableSegmentMetadata transactionMeta = this.dataSource.getStreamSegmentMetadata(op.getTransactionSegmentId());
if (transactionMeta == null || transactionMeta.isDeleted()) {
return Futures.failedFuture(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment is deleted or missing from the metadata.", op), this.metadata, storageInfo));
}
// Verify that the operation fits fully within this segment (mergers are atomic - they either merge all or nothing).
if (op.getLastStreamSegmentOffset() > storageInfo.getLength()) {
return Futures.failedFuture(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment is not fully merged into the parent.", op), this.metadata, storageInfo));
}
// Verify that the transaction segment does not exist in Storage anymore.
return this.storage.exists(transactionMeta.getName(), timer.getRemaining()).thenApplyAsync(exists -> {
if (exists) {
throw new CompletionException(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment still exists in Storage.", op), this.metadata, storageInfo));
}
// Pop the first operation off the list and update the metadata for the transaction segment.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeTransactionOperation : "First outstanding operation was not a MergeTransactionOperation";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
updateMetadataForTransactionPostMerger(transactionMeta);
return new FlushResult().withMergedBytes(op.getLength());
}, this.executor);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class SegmentAggregator method mergeWith.
/**
* Merges the Transaction StreamSegment with given metadata into this one at the current offset.
*
* @param transactionMetadata The metadata of the Transaction StreamSegment to merge.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the number of bytes that were merged into this
* StreamSegment. If failed, the Future will contain the exception that caused it.
*/
private CompletableFuture<FlushResult> mergeWith(UpdateableSegmentMetadata transactionMetadata, MergeTransactionOperation mergeOp, TimeoutTimer timer) {
if (transactionMetadata.isDeleted()) {
return Futures.failedFuture(new DataCorruptionException(String.format("Attempted to merge with deleted Transaction segment '%s'.", transactionMetadata.getName())));
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "mergeWith", transactionMetadata.getId(), transactionMetadata.getName(), transactionMetadata.isSealedInStorage());
FlushResult result = new FlushResult();
if (!transactionMetadata.isSealedInStorage() || transactionMetadata.getLength() > transactionMetadata.getStorageLength()) {
// Nothing to do. Given Transaction is not eligible for merger yet.
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return CompletableFuture.completedFuture(result);
}
AtomicLong mergedLength = new AtomicLong();
return this.storage.getStreamSegmentInfo(transactionMetadata.getName(), timer.getRemaining()).thenAcceptAsync(transProperties -> {
// Check that the Storage agrees with our metadata (if not, we have a problem ...)
if (transProperties.getLength() != transactionMetadata.getStorageLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because its metadata disagrees with the Storage. Metadata.StorageLength=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), transactionMetadata.getStorageLength(), transProperties.getLength())));
}
if (transProperties.getLength() != mergeOp.getLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because the declared length in the operation disagrees with the Storage. Operation.Length=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), mergeOp.getLength(), transProperties.getLength())));
}
mergedLength.set(transProperties.getLength());
}, this.executor).thenComposeAsync(v1 -> storage.concat(this.handle.get(), mergeOp.getStreamSegmentOffset(), transactionMetadata.getName(), timer.getRemaining()), this.executor).thenComposeAsync(v2 -> storage.getStreamSegmentInfo(this.metadata.getName(), timer.getRemaining()), this.executor).thenApplyAsync(segmentProperties -> {
// We have processed a MergeTransactionOperation, pop the first operation off and decrement the counter.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeTransactionOperation : "First outstanding operation was not a MergeTransactionOperation";
assert ((MergeTransactionOperation) processedOperation).getTransactionSegmentId() == transactionMetadata.getId() : "First outstanding operation was a MergeTransactionOperation for the wrong Transaction id.";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
// Post-merger validation. Verify we are still in agreement with the storage.
long expectedNewLength = this.metadata.getStorageLength() + mergedLength.get();
if (segmentProperties.getLength() != expectedNewLength) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' was merged into parent '%s' but the parent segment has an unexpected StorageLength after the merger. Previous=%d, MergeLength=%d, Expected=%d, Actual=%d", transactionMetadata.getName(), this.metadata.getName(), segmentProperties.getLength(), mergedLength.get(), expectedNewLength, segmentProperties.getLength())));
}
updateMetadata(segmentProperties);
updateMetadataForTransactionPostMerger(transactionMetadata);
this.lastFlush.set(this.timer.getElapsed());
result.withMergedBytes(mergedLength.get());
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
Throwable realEx = Exceptions.unwrap(ex);
if (realEx instanceof BadOffsetException || realEx instanceof StreamSegmentNotExistsException) {
// We either attempted to write at an offset that already contained other data or the Transaction
// Segment no longer exists. This can happen for a number of reasons, but we do not have enough
// information here to determine why. We need to enter reconciliation mode, and hope for the best.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class SegmentAggregator method mergeIfNecessary.
/**
* Executes a merger of a Transaction StreamSegment into this one.
* Conditions for merger:
* <ul>
* <li> This StreamSegment is stand-alone (not a Transaction).
* <li> The next outstanding operation is a MergeTransactionOperation for a Transaction StreamSegment of this StreamSegment.
* <li> The StreamSegment to merge is not deleted, it is sealed and is fully flushed to Storage.
* </ul>
* Effects of the merger:
* <ul> The entire contents of the given Transaction StreamSegment will be concatenated to this StreamSegment as one unit.
* <li> The metadata for this StreamSegment will be updated to reflect the new length of this StreamSegment.
* <li> The given Transaction Segment will cease to exist.
* </ul>
* <p>
* Note that various other data integrity checks are done pre and post merger as part of this operation which are meant
* to ensure the StreamSegment is not in a corrupted state.
*
* @param flushResult The flush result from the previous chained operation.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the number of bytes that were merged into this
* StreamSegment. If failed, the Future will contain the exception that caused it.
*/
private CompletableFuture<FlushResult> mergeIfNecessary(FlushResult flushResult, TimeoutTimer timer) {
ensureInitializedAndNotClosed();
assert !this.metadata.isTransaction() : "Cannot merge into a Transaction StreamSegment.";
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "mergeIfNecessary");
StorageOperation first = this.operations.getFirst();
if (first == null || !(first instanceof MergeTransactionOperation)) {
// Either no operation or first operation is not a MergeTransaction. Nothing to do.
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeIfNecessary", traceId, flushResult);
return CompletableFuture.completedFuture(flushResult);
}
MergeTransactionOperation mergeTransactionOperation = (MergeTransactionOperation) first;
UpdateableSegmentMetadata transactionMetadata = this.dataSource.getStreamSegmentMetadata(mergeTransactionOperation.getTransactionSegmentId());
return mergeWith(transactionMetadata, mergeTransactionOperation, timer).thenApply(mergeResult -> {
flushResult.withFlushResult(mergeResult);
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeIfNecessary", traceId, flushResult);
return flushResult;
});
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class StreamSegmentContainerMetadataTests method testGetEvictionCandidates.
/**
* Tests the ability to identify Segment Metadatas that are not in use anymore and are eligible for eviction.
* 1. Creates a number of segment, and 1/4 of them have transactions.
* 2. All transactions are set to expire at a particular time and the segments expire in two separate stages.
* 3. Truncates repeatedly and at each step verifies that the correct segments were identified as candidates.
* 4. "Expires" all transactions and verifies that all dependent segments (which are eligible) are also identified.
* 5. "Expires" all segments and verifies they are all identified as candidates.
*/
@Test
public void testGetEvictionCandidates() {
// Expire each segment at a different stage.
final long firstStageExpiration = SEGMENT_COUNT;
final long transactionExpiration = firstStageExpiration + SEGMENT_COUNT;
final long finalExpiration = transactionExpiration + SEGMENT_COUNT;
// Create a number of segments, out of which every 4th one has a transaction (25%).
// Each segment has a 'LastKnownSequenceNumber' set in incremental order.
final ArrayList<Long> segments = new ArrayList<>();
final HashMap<Long, Long> transactions = new HashMap<>();
final StreamSegmentContainerMetadata m = new MetadataBuilder(CONTAINER_ID).buildAs();
populateSegmentsForEviction(segments, transactions, m);
for (int i = 0; i < segments.size(); i++) {
UpdateableSegmentMetadata segmentMetadata = m.getStreamSegmentMetadata(segments.get(i));
if (segmentMetadata.isTransaction()) {
// All transactions expire at once, in a second step.
segmentMetadata.setLastUsed(transactionExpiration);
} else if (i % 2 == 0) {
// 1/2 of segments expire at the end.
segmentMetadata.setLastUsed(finalExpiration);
} else {
// The rest of the segments expire in the first stage.
segmentMetadata.setLastUsed(firstStageExpiration);
}
}
// Add one segment that will be deleted. This should be evicted as soon as its LastUsed is before the truncation point.
final long deletedSegmentId = segments.size();
UpdateableSegmentMetadata deletedSegment = m.mapStreamSegmentId(getName(deletedSegmentId), deletedSegmentId);
deletedSegment.markDeleted();
deletedSegment.setLastUsed(firstStageExpiration);
segments.add(deletedSegmentId);
// Verify that not-yet-truncated operations will not be selected for truncation.
val truncationPoints = Arrays.asList(0L, firstStageExpiration, transactionExpiration, finalExpiration, finalExpiration + 1);
Collection<SegmentMetadata> evictionCandidates;
for (long truncatedSeqNo : truncationPoints) {
// Simulate a truncation.
m.removeTruncationMarkers(truncatedSeqNo);
// Try to evict everything.
evictionCandidates = m.getEvictionCandidates(finalExpiration + 1, Integer.MAX_VALUE);
checkEvictedSegmentCandidates(evictionCandidates, transactions, m, finalExpiration + 1, truncatedSeqNo);
}
// Now we expire transactions.
evictionCandidates = m.getEvictionCandidates(transactionExpiration + 1, Integer.MAX_VALUE);
checkEvictedSegmentCandidates(evictionCandidates, transactions, m, transactionExpiration + 1, Long.MAX_VALUE);
// Now we expire all segments.
evictionCandidates = m.getEvictionCandidates(finalExpiration + 1, Integer.MAX_VALUE);
checkEvictedSegmentCandidates(evictionCandidates, transactions, m, finalExpiration + 1, Long.MAX_VALUE);
// Check that, in the end, all segments in the metadata have been selected for eviction.
Assert.assertEquals("Not all segments were evicted.", segments.size(), evictionCandidates.size());
}
Aggregations