use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.
the class SegmentAggregatorTests method testDeletedSegmentInMetadata.
/**
* Tests the case when a Segment is deleted in the Metadata and ReadIndex.
* Note that we are not testing the case when the Segment is deleted only in Storage, since the SegmentContainer
* first deletes in the Metadata and then in Storage, and that would simply throw a StreamSegmentNotExistsException,
* which will subside as soon as the Metadata is updated and the StorageWriter cleans up the SegmentAggregator.
*/
@Test
public void testDeletedSegmentInMetadata() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
@Cleanup TestContext context = new TestContext(config);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
// Add one operation big enough to trigger a Flush.
byte[] appendData = new byte[config.getFlushThresholdBytes() + 1];
StorageOperation appendOp = generateAppendAndUpdateMetadata(SEGMENT_ID, appendData, context);
context.segmentAggregator.add(appendOp);
Assert.assertTrue("Unexpected value returned by mustFlush() (size threshold).", context.segmentAggregator.mustFlush());
// Delete the segment in the Metadata & Read index. We want to make sure we do this while the flush() method is
// running, hence the callback (flush() has a check at the beginning that exits if the metadata indicates deleted).
context.dataSource.setOnGetAppendData(() -> {
context.containerMetadata.deleteStreamSegment(context.segmentAggregator.getMetadata().getName());
context.dataSource.clearAppendData();
});
// Call flush() and inspect the result.
FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertEquals("Not expecting any bytes to be flushed.", 0, flushResult.getFlushedBytes());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
Assert.assertFalse("Unexpected value returned by mustFlush() after flush.", context.segmentAggregator.mustFlush());
}
use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.
the class SegmentAggregatorTests method testTruncateAndSeal.
/**
* Tests the flush() method with StreamSegmentTruncateOperations after the segment has been Sealed.
*/
@Test
public void testTruncateAndSeal() throws Exception {
// Add some data and intersperse with truncates.
final int appendCount = 1000;
final int truncateEvery = 20;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Accumulate some Appends
AtomicLong outstandingSize = new AtomicLong();
SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
outstandingSize.addAndGet(appendOp.getLength());
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
sequenceNumbers.record(appendOp);
if (i % truncateEvery == 1) {
StorageOperation truncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(truncateOp);
sequenceNumbers.record(truncateOp);
}
}
// Generate and add a Seal Operation.
StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(sealOp);
// Add another truncate op, after the Seal.
StorageOperation lastTruncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(lastTruncateOp);
FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertEquals("Expected the entire Aggregator to be flushed.", outstandingSize.get(), flushResult.getFlushedBytes());
Assert.assertFalse("Unexpected value returned by mustFlush() after flushing.", context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flushing.", Operation.NO_SEQUENCE_NUMBER, context.segmentAggregator.getLowestUncommittedSequenceNumber());
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
Assert.assertTrue("Unexpected sealed status in Storage.", storageInfo.isSealed());
Assert.assertEquals("Unexpected truncation offset in Storage.", lastTruncateOp.getStreamSegmentOffset(), context.storage.getTruncationOffset(context.segmentAggregator.getMetadata().getName()));
context.storage.read(InMemoryStorage.newHandle(context.segmentAggregator.getMetadata().getName(), false), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.
the class SegmentAggregatorTests method testReconcileSeal.
/**
* Tests the ability of the SegmentAggregator to reconcile StreamSegmentSealOperations.
*/
@Test
public void testReconcileSeal() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
// The seal succeeds, but we throw some random error, indicating that it didn't.
context.storage.setSealInterceptor((segmentName, storage) -> {
storage.seal(writeHandle(segmentName), TIMEOUT).join();
throw new IntentionalException(String.format("S=%s", segmentName));
});
// Attempt to seal.
StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(sealOp);
// First time: attempt to flush/seal, which must end in failure.
AssertExtensions.assertThrows("IntentionalException did not propagate to flush() caller.", () -> context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> Exceptions.unwrap(ex) instanceof IntentionalException);
context.storage.setSealInterceptor(null);
// Second time: we are in reconciliation mode, so flush must succeed (and update internal state based on storage).
context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify outcome.
Assert.assertTrue("Segment not marked as sealed in storage (in metadata).", context.segmentAggregator.getMetadata().isSealedInStorage());
Assert.assertTrue("SegmentAggregator not closed.", context.segmentAggregator.isClosed());
}
use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.
the class OperationProcessorTests method testWithInvalidOperations.
/**
* Tests the ability of the OperationProcessor to process Operations when encountering invalid operations (such as
* appends to StreamSegments that do not exist or to those that are sealed). This covers the following exceptions:
* * StreamSegmentNotExistsException
* * StreamSegmentSealedException
* * General MetadataUpdateException.
*/
@Test
public void testWithInvalidOperations() throws Exception {
int streamSegmentCount = 10;
int appendsPerStreamSegment = 40;
// We are going to prematurely seal this StreamSegment.
long sealedStreamSegmentId = 6;
// We are going to prematurely mark this StreamSegment as deleted.
long deletedStreamSegmentId = 8;
// This is a bogus StreamSegment, that does not exist.
long nonExistentStreamSegmentId;
@Cleanup TestContext context = new TestContext();
// Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
nonExistentStreamSegmentId = streamSegmentIds.size();
streamSegmentIds.add(nonExistentStreamSegmentId);
context.metadata.getStreamSegmentMetadata(sealedStreamSegmentId).markSealed();
context.metadata.getStreamSegmentMetadata(deletedStreamSegmentId).markDeleted();
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Setup an OperationProcessor and start it.
@Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
operationProcessor.startAsync().awaitRunning();
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
// Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof MetadataUpdateException || ex instanceof StreamSegmentException);
HashSet<Long> streamSegmentsWithNoContents = new HashSet<>();
streamSegmentsWithNoContents.add(sealedStreamSegmentId);
streamSegmentsWithNoContents.add(deletedStreamSegmentId);
streamSegmentsWithNoContents.add(nonExistentStreamSegmentId);
// Verify that the "right" operations failed, while the others succeeded.
for (OperationWithCompletion oc : completionFutures) {
if (oc.operation instanceof StorageOperation) {
long streamSegmentId = ((StorageOperation) oc.operation).getStreamSegmentId();
if (streamSegmentsWithNoContents.contains(streamSegmentId)) {
Assert.assertTrue("Completion future for invalid StreamSegment " + streamSegmentId + " did not complete exceptionally.", oc.completion.isCompletedExceptionally());
Predicate<Throwable> errorValidator;
if (streamSegmentId == sealedStreamSegmentId) {
errorValidator = ex -> ex instanceof StreamSegmentSealedException;
} else if (streamSegmentId == deletedStreamSegmentId) {
errorValidator = ex -> ex instanceof StreamSegmentNotExistsException;
} else {
errorValidator = ex -> ex instanceof MetadataUpdateException;
}
AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, errorValidator);
continue;
}
}
// If we get here, we must verify no exception was thrown.
oc.completion.join();
}
performLogOperationChecks(completionFutures, context.memoryLog, dataLog, context.metadata);
performMetadataChecks(streamSegmentIds, streamSegmentsWithNoContents, new HashMap<>(), completionFutures, context.metadata, false, false);
performReadIndexChecks(completionFutures, context.readIndex);
operationProcessor.stopAsync().awaitTerminated();
}
use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.
the class SegmentAggregator method reconcileMergeOperation.
/**
* Attempts to reconcile the given MergeTransactionOperation.
*
* @param op The Operation to reconcile.
* @param storageInfo The current state of the Segment in Storage.
* @param timer Timer for the operation
* @return A CompletableFuture containing a FlushResult with the number of bytes reconciled, or failed with a ReconciliationFailureException,
* if the operation cannot be reconciled, based on the in-memory metadata or the current state of the Segment in Storage.
*/
private CompletableFuture<FlushResult> reconcileMergeOperation(MergeTransactionOperation op, SegmentProperties storageInfo, TimeoutTimer timer) {
// Verify that the transaction segment is still registered in metadata.
UpdateableSegmentMetadata transactionMeta = this.dataSource.getStreamSegmentMetadata(op.getTransactionSegmentId());
if (transactionMeta == null || transactionMeta.isDeleted()) {
return Futures.failedFuture(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment is deleted or missing from the metadata.", op), this.metadata, storageInfo));
}
// Verify that the operation fits fully within this segment (mergers are atomic - they either merge all or nothing).
if (op.getLastStreamSegmentOffset() > storageInfo.getLength()) {
return Futures.failedFuture(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment is not fully merged into the parent.", op), this.metadata, storageInfo));
}
// Verify that the transaction segment does not exist in Storage anymore.
return this.storage.exists(transactionMeta.getName(), timer.getRemaining()).thenApplyAsync(exists -> {
if (exists) {
throw new CompletionException(new ReconciliationFailureException(String.format("Cannot reconcile operation '%s' because the transaction segment still exists in Storage.", op), this.metadata, storageInfo));
}
// Pop the first operation off the list and update the metadata for the transaction segment.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeTransactionOperation : "First outstanding operation was not a MergeTransactionOperation";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
updateMetadataForTransactionPostMerger(transactionMeta);
return new FlushResult().withMergedBytes(op.getLength());
}, this.executor);
}
Aggregations