use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class DurableLogTests method testTruncateWithRecovery.
/**
* Tests the truncate() method while performing recovery.
*/
@Test
public void testTruncateWithRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
@Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
HashSet<Long> streamSegmentIds;
List<OperationWithCompletion> completionFutures;
List<Operation> originalOperations;
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
completionFutures = processOperations(queuedOperations, durableLog);
OperationWithCompletion.allOf(completionFutures).join();
// Get a list of all the operations, before any truncation.
originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Truncate up to each MetadataCheckpointOperation and:
// * If the DataLog was truncated:
// ** Shut down DurableLog, re-start it (recovery) and verify the operations are as they should.
// At the end, verify all operations and all entries in the DataLog were truncated.
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
try {
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
if (!(currentOperation instanceof MetadataCheckpointOperation)) {
// We can only truncate on MetadataCheckpointOperations.
continue;
}
truncationOccurred.set(false);
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
if (truncationOccurred.get()) {
// Close current DurableLog and start a brand new one, forcing recovery.
durableLog.close();
durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
durableLog.startAsync().awaitRunning();
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Verify all operations up to, and including this one have been removed.
Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
verifyFirstItemIsMetadataCheckpoint(reader);
if (i < originalOperations.size() - 1) {
Operation firstOp = reader.next();
OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
}
}
}
} finally {
// This closes whatever current instance this variable refers to, not necessarily the first one.
durableLog.close();
}
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class StreamSegmentContainer method mergeTransaction.
@Override
public CompletableFuture<Void> mergeTransaction(String transactionName, Duration timeout) {
ensureRunning();
logRequest("mergeTransaction", transactionName);
this.metrics.mergeTxn();
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.segmentMapper.getOrAssignStreamSegmentId(transactionName, timer.getRemaining(), transactionId -> {
SegmentMetadata transactionMetadata = this.metadata.getStreamSegmentMetadata(transactionId);
if (transactionMetadata == null) {
throw new CompletionException(new StreamSegmentNotExistsException(transactionName));
}
Operation op = new MergeTransactionOperation(transactionMetadata.getParentId(), transactionMetadata.getId());
return this.durableLog.add(op, timer.getRemaining());
}).thenComposeAsync(v -> this.stateStore.remove(transactionName, timer.getRemaining()), this.executor);
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class MemoryStateUpdaterTests method testRecoveryMode.
/**
* Tests the ability of the MemoryStateUpdater to delegate Enter/Exit recovery mode to the read index.
*/
@Test
public void testRecoveryMode() throws Exception {
// Check it's properly delegated to Read index.
SequencedItemList<Operation> opLog = new SequencedItemList<>();
ArrayList<TestReadIndex.MethodInvocation> methodInvocations = new ArrayList<>();
TestReadIndex readIndex = new TestReadIndex(methodInvocations::add);
MemoryStateUpdater updater = new MemoryStateUpdater(opLog, readIndex, Runnables.doNothing());
UpdateableContainerMetadata metadata1 = new MetadataBuilder(1).build();
updater.enterRecoveryMode(metadata1);
updater.exitRecoveryMode(true);
Assert.assertEquals("Unexpected number of method invocations.", 2, methodInvocations.size());
TestReadIndex.MethodInvocation enterRecovery = methodInvocations.get(0);
Assert.assertEquals("ReadIndex.enterRecoveryMode was not called when expected.", TestReadIndex.ENTER_RECOVERY_MODE, enterRecovery.methodName);
Assert.assertEquals("ReadIndex.enterRecoveryMode was called with the wrong arguments.", metadata1, enterRecovery.args.get("recoveryMetadataSource"));
TestReadIndex.MethodInvocation exitRecovery = methodInvocations.get(1);
Assert.assertEquals("ReadIndex.exitRecoveryMode was not called when expected.", TestReadIndex.EXIT_RECOVERY_MODE, exitRecovery.methodName);
Assert.assertEquals("ReadIndex.exitRecoveryMode was called with the wrong arguments.", true, exitRecovery.args.get("successfulRecovery"));
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class OperationProcessorTests method testWithInvalidOperations.
/**
* Tests the ability of the OperationProcessor to process Operations when encountering invalid operations (such as
* appends to StreamSegments that do not exist or to those that are sealed). This covers the following exceptions:
* * StreamSegmentNotExistsException
* * StreamSegmentSealedException
* * General MetadataUpdateException.
*/
@Test
public void testWithInvalidOperations() throws Exception {
int streamSegmentCount = 10;
int appendsPerStreamSegment = 40;
// We are going to prematurely seal this StreamSegment.
long sealedStreamSegmentId = 6;
// We are going to prematurely mark this StreamSegment as deleted.
long deletedStreamSegmentId = 8;
// This is a bogus StreamSegment, that does not exist.
long nonExistentStreamSegmentId;
@Cleanup TestContext context = new TestContext();
// Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
nonExistentStreamSegmentId = streamSegmentIds.size();
streamSegmentIds.add(nonExistentStreamSegmentId);
context.metadata.getStreamSegmentMetadata(sealedStreamSegmentId).markSealed();
context.metadata.getStreamSegmentMetadata(deletedStreamSegmentId).markDeleted();
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Setup an OperationProcessor and start it.
@Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
operationProcessor.startAsync().awaitRunning();
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
// Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof MetadataUpdateException || ex instanceof StreamSegmentException);
HashSet<Long> streamSegmentsWithNoContents = new HashSet<>();
streamSegmentsWithNoContents.add(sealedStreamSegmentId);
streamSegmentsWithNoContents.add(deletedStreamSegmentId);
streamSegmentsWithNoContents.add(nonExistentStreamSegmentId);
// Verify that the "right" operations failed, while the others succeeded.
for (OperationWithCompletion oc : completionFutures) {
if (oc.operation instanceof StorageOperation) {
long streamSegmentId = ((StorageOperation) oc.operation).getStreamSegmentId();
if (streamSegmentsWithNoContents.contains(streamSegmentId)) {
Assert.assertTrue("Completion future for invalid StreamSegment " + streamSegmentId + " did not complete exceptionally.", oc.completion.isCompletedExceptionally());
Predicate<Throwable> errorValidator;
if (streamSegmentId == sealedStreamSegmentId) {
errorValidator = ex -> ex instanceof StreamSegmentSealedException;
} else if (streamSegmentId == deletedStreamSegmentId) {
errorValidator = ex -> ex instanceof StreamSegmentNotExistsException;
} else {
errorValidator = ex -> ex instanceof MetadataUpdateException;
}
AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, errorValidator);
continue;
}
}
// If we get here, we must verify no exception was thrown.
oc.completion.join();
}
performLogOperationChecks(completionFutures, context.memoryLog, dataLog, context.metadata);
performMetadataChecks(streamSegmentIds, streamSegmentsWithNoContents, new HashMap<>(), completionFutures, context.metadata, false, false);
performReadIndexChecks(completionFutures, context.readIndex);
operationProcessor.stopAsync().awaitTerminated();
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class OperationProcessorTests method testWithOperationSerializationFailures.
/**
* Tests the ability of the OperationProcessor to process Operations when Serialization errors happen.
*/
@Test
public void testWithOperationSerializationFailures() throws Exception {
int streamSegmentCount = 10;
int appendsPerStreamSegment = 80;
// Fail every X appends encountered.
int failAppendFrequency = 7;
@Cleanup TestContext context = new TestContext();
// Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Replace some of the Append Operations with a FailedAppendOperation. Some operations fail at the beginning,
// some at the end of the serialization.
int appendCount = 0;
HashSet<Integer> failedOperationIndices = new HashSet<>();
for (int i = 0; i < operations.size(); i++) {
if (operations.get(i) instanceof StreamSegmentAppendOperation) {
if ((appendCount++) % failAppendFrequency == 0) {
operations.set(i, new FailedStreamSegmentAppendOperation((StreamSegmentAppendOperation) operations.get(i)));
failedOperationIndices.add(i);
}
}
}
// Setup an OperationProcessor and start it.
@Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
operationProcessor.startAsync().awaitRunning();
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
// Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof IntentionalException);
// Verify that the "right" operations failed, while the others succeeded.
for (int i = 0; i < completionFutures.size(); i++) {
OperationWithCompletion oc = completionFutures.get(i);
if (failedOperationIndices.contains(i)) {
AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, ex -> ex instanceof IntentionalException);
} else {
// Verify no exception was thrown.
oc.completion.join();
}
}
performLogOperationChecks(completionFutures, context.memoryLog, dataLog, context.metadata);
performMetadataChecks(streamSegmentIds, new HashSet<>(), new HashMap<>(), completionFutures, context.metadata, false, false);
performReadIndexChecks(completionFutures, context.readIndex);
operationProcessor.stopAsync().awaitTerminated();
}
Aggregations