Search in sources :

Example 16 with TestDurableDataLog

use of io.pravega.segmentstore.server.TestDurableDataLog in project pravega by pravega.

the class DurableLogTests method testTruncateWithRecovery.

/**
 * Tests the truncate() method while performing recovery.
 */
@Test
public void testTruncateWithRecovery() {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Setup a DurableLog and start it.
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    @Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
    HashSet<Long> streamSegmentIds;
    List<OperationWithCompletion> completionFutures;
    List<Operation> originalOperations;
    // First DurableLog. We use this for generating data.
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
        List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        completionFutures = processOperations(queuedOperations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Get a list of all the operations, before any truncation.
        originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Truncate up to each MetadataCheckpointOperation and:
    // * If the DataLog was truncated:
    // ** Shut down DurableLog, re-start it (recovery) and verify the operations are as they should.
    // At the end, verify all operations and all entries in the DataLog were truncated.
    DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
    try {
        durableLog.startAsync().awaitRunning();
        dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
        for (int i = 0; i < originalOperations.size(); i++) {
            Operation currentOperation = originalOperations.get(i);
            if (!(currentOperation instanceof MetadataCheckpointOperation)) {
                // We can only truncate on MetadataCheckpointOperations.
                continue;
            }
            truncationOccurred.set(false);
            durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
            if (truncationOccurred.get()) {
                // Close current DurableLog and start a brand new one, forcing recovery.
                durableLog.close();
                durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
                durableLog.startAsync().awaitRunning();
                dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
                // Verify all operations up to, and including this one have been removed.
                Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
                Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
                verifyFirstItemIsMetadataCheckpoint(reader);
                if (i < originalOperations.size() - 1) {
                    Operation firstOp = reader.next();
                    OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
                }
            }
        }
    } finally {
        // This closes whatever current instance this variable refers to, not necessarily the first one.
        durableLog.close();
    }
}
Also used : TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Example 17 with TestDurableDataLog

use of io.pravega.segmentstore.server.TestDurableDataLog in project pravega by pravega.

the class OperationProcessorTests method testWithInvalidOperations.

/**
 * Tests the ability of the OperationProcessor to process Operations when encountering invalid operations (such as
 * appends to StreamSegments that do not exist or to those that are sealed). This covers the following exceptions:
 * * StreamSegmentNotExistsException
 * * StreamSegmentSealedException
 * * General MetadataUpdateException.
 */
@Test
public void testWithInvalidOperations() throws Exception {
    int streamSegmentCount = 10;
    int appendsPerStreamSegment = 40;
    // We are going to prematurely seal this StreamSegment.
    long sealedStreamSegmentId = 6;
    // We are going to prematurely mark this StreamSegment as deleted.
    long deletedStreamSegmentId = 8;
    // This is a bogus StreamSegment, that does not exist.
    long nonExistentStreamSegmentId;
    @Cleanup TestContext context = new TestContext();
    // Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
    HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
    nonExistentStreamSegmentId = streamSegmentIds.size();
    streamSegmentIds.add(nonExistentStreamSegmentId);
    context.metadata.getStreamSegmentMetadata(sealedStreamSegmentId).markSealed();
    context.metadata.getStreamSegmentMetadata(deletedStreamSegmentId).markDeleted();
    List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
    // Setup an OperationProcessor and start it.
    @Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
    dataLog.initialize(TIMEOUT);
    @Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
    operationProcessor.startAsync().awaitRunning();
    // Process all generated operations.
    List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
    // Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
    AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof MetadataUpdateException || ex instanceof StreamSegmentException);
    HashSet<Long> streamSegmentsWithNoContents = new HashSet<>();
    streamSegmentsWithNoContents.add(sealedStreamSegmentId);
    streamSegmentsWithNoContents.add(deletedStreamSegmentId);
    streamSegmentsWithNoContents.add(nonExistentStreamSegmentId);
    // Verify that the "right" operations failed, while the others succeeded.
    for (OperationWithCompletion oc : completionFutures) {
        if (oc.operation instanceof StorageOperation) {
            long streamSegmentId = ((StorageOperation) oc.operation).getStreamSegmentId();
            if (streamSegmentsWithNoContents.contains(streamSegmentId)) {
                Assert.assertTrue("Completion future for invalid StreamSegment " + streamSegmentId + " did not complete exceptionally.", oc.completion.isCompletedExceptionally());
                Predicate<Throwable> errorValidator;
                if (streamSegmentId == sealedStreamSegmentId) {
                    errorValidator = ex -> ex instanceof StreamSegmentSealedException;
                } else if (streamSegmentId == deletedStreamSegmentId) {
                    errorValidator = ex -> ex instanceof StreamSegmentNotExistsException;
                } else {
                    errorValidator = ex -> ex instanceof MetadataUpdateException;
                }
                AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, errorValidator);
                continue;
            }
        }
        // If we get here, we must verify no exception was thrown.
        oc.completion.join();
    }
    performLogOperationChecks(completionFutures, context.memoryLog, dataLog, context.metadata);
    performMetadataChecks(streamSegmentIds, streamSegmentsWithNoContents, new HashMap<>(), completionFutures, context.metadata, false, false);
    performReadIndexChecks(completionFutures, context.readIndex);
    operationProcessor.stopAsync().awaitTerminated();
}
Also used : OperationSerializer(io.pravega.segmentstore.server.logs.operations.OperationSerializer) Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) LogAddress(io.pravega.segmentstore.storage.LogAddress) ArrayView(io.pravega.common.util.ArrayView) SequencedItemList(io.pravega.common.util.SequencedItemList) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) Operation(io.pravega.segmentstore.server.logs.operations.Operation) CloseableIterator(io.pravega.common.util.CloseableIterator) CacheFactory(io.pravega.segmentstore.storage.CacheFactory) ServiceListeners(io.pravega.segmentstore.server.ServiceListeners) CancellationException(java.util.concurrent.CancellationException) Predicate(java.util.function.Predicate) Collection(java.util.Collection) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) CompletionException(java.util.concurrent.CompletionException) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) List(java.util.List) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) ObjectClosedException(io.pravega.common.ObjectClosedException) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) ConfigHelpers(io.pravega.segmentstore.server.ConfigHelpers) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Supplier(java.util.function.Supplier) TruncationMarkerRepository(io.pravega.segmentstore.server.TruncationMarkerRepository) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) Runnables(com.google.common.util.concurrent.Runnables) ReadIndexConfig(io.pravega.segmentstore.server.reading.ReadIndexConfig) Timeout(org.junit.rules.Timeout) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) OperationComparer(io.pravega.segmentstore.server.logs.operations.OperationComparer) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) DurableDataLog(io.pravega.segmentstore.storage.DurableDataLog) Iterator(java.util.Iterator) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) Service(com.google.common.util.concurrent.Service) TimeUnit(java.util.concurrent.TimeUnit) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) QueueStats(io.pravega.segmentstore.storage.QueueStats) ReadIndex(io.pravega.segmentstore.server.ReadIndex) Assert(org.junit.Assert) Collections(java.util.Collections) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 18 with TestDurableDataLog

use of io.pravega.segmentstore.server.TestDurableDataLog in project pravega by pravega.

the class OperationProcessorTests method testWithOperationSerializationFailures.

/**
 * Tests the ability of the OperationProcessor to process Operations when Serialization errors happen.
 */
@Test
public void testWithOperationSerializationFailures() throws Exception {
    int streamSegmentCount = 10;
    int appendsPerStreamSegment = 80;
    // Fail every X appends encountered.
    int failAppendFrequency = 7;
    @Cleanup TestContext context = new TestContext();
    // Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
    HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
    List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
    // Replace some of the Append Operations with a FailedAppendOperation. Some operations fail at the beginning,
    // some at the end of the serialization.
    int appendCount = 0;
    HashSet<Integer> failedOperationIndices = new HashSet<>();
    for (int i = 0; i < operations.size(); i++) {
        if (operations.get(i) instanceof StreamSegmentAppendOperation) {
            if ((appendCount++) % failAppendFrequency == 0) {
                operations.set(i, new FailedStreamSegmentAppendOperation((StreamSegmentAppendOperation) operations.get(i)));
                failedOperationIndices.add(i);
            }
        }
    }
    // Setup an OperationProcessor and start it.
    @Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
    dataLog.initialize(TIMEOUT);
    @Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
    operationProcessor.startAsync().awaitRunning();
    // Process all generated operations.
    List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
    // Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
    AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof IntentionalException);
    // Verify that the "right" operations failed, while the others succeeded.
    for (int i = 0; i < completionFutures.size(); i++) {
        OperationWithCompletion oc = completionFutures.get(i);
        if (failedOperationIndices.contains(i)) {
            AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, ex -> ex instanceof IntentionalException);
        } else {
            // Verify no exception was thrown.
            oc.completion.join();
        }
    }
    performLogOperationChecks(completionFutures, context.memoryLog, dataLog, context.metadata);
    performMetadataChecks(streamSegmentIds, new HashSet<>(), new HashMap<>(), completionFutures, context.metadata, false, false);
    performReadIndexChecks(completionFutures, context.readIndex);
    operationProcessor.stopAsync().awaitTerminated();
}
Also used : TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) IntentionalException(io.pravega.test.common.IntentionalException) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 19 with TestDurableDataLog

use of io.pravega.segmentstore.server.TestDurableDataLog in project pravega by pravega.

the class OperationProcessorTests method testWithDataLogNotPrimaryException.

/**
 * Tests the ability of the OperationProcessor handle a DataLogWriterNotPrimaryException.
 */
@Test
public void testWithDataLogNotPrimaryException() throws Exception {
    int streamSegmentCount = 1;
    int appendsPerStreamSegment = 1;
    @Cleanup TestContext context = new TestContext();
    // Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
    HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
    List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
    // Setup an OperationProcessor and start it.
    @Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
    dataLog.initialize(TIMEOUT);
    @Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
    operationProcessor.startAsync().awaitRunning();
    ErrorInjector<Exception> aSyncErrorInjector = new ErrorInjector<>(count -> true, () -> new CompletionException(new DataLogWriterNotPrimaryException("intentional")));
    dataLog.setAppendErrorInjectors(null, aSyncErrorInjector);
    // Process all generated operations.
    List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
    // Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
    AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof IOException || ex instanceof DataLogWriterNotPrimaryException);
    // Verify that the OperationProcessor automatically shuts down and that it has the right failure cause.
    ServiceListeners.awaitShutdown(operationProcessor, TIMEOUT, false);
    Assert.assertEquals("OperationProcessor is not in a failed state after fence-out detected.", Service.State.FAILED, operationProcessor.state());
    Assert.assertTrue("OperationProcessor did not fail with the correct exception.", operationProcessor.failureCause() instanceof DataLogWriterNotPrimaryException);
}
Also used : DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) ErrorInjector(io.pravega.test.common.ErrorInjector) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) IOException(java.io.IOException) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) CancellationException(java.util.concurrent.CancellationException) CompletionException(java.util.concurrent.CompletionException) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) ObjectClosedException(io.pravega.common.ObjectClosedException) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) Test(org.junit.Test)

Aggregations

TestDurableDataLog (io.pravega.segmentstore.server.TestDurableDataLog)19 Test (org.junit.Test)19 Cleanup (lombok.Cleanup)15 ErrorInjector (io.pravega.test.common.ErrorInjector)12 IOException (java.io.IOException)12 ObjectClosedException (io.pravega.common.ObjectClosedException)11 ProbeOperation (io.pravega.segmentstore.server.logs.operations.ProbeOperation)11 Operation (io.pravega.segmentstore.server.logs.operations.Operation)10 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)10 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)10 HashSet (java.util.HashSet)10 lombok.val (lombok.val)10 AssertExtensions (io.pravega.test.common.AssertExtensions)9 IntentionalException (io.pravega.test.common.IntentionalException)9 Duration (java.time.Duration)9 ArrayList (java.util.ArrayList)9 List (java.util.List)9 Predicate (java.util.function.Predicate)9 Assert (org.junit.Assert)9 Rule (org.junit.Rule)9