Search in sources :

Example 1 with ContainerReadIndex

use of io.pravega.segmentstore.server.reading.ContainerReadIndex in project pravega by pravega.

the class DurableLogTests method testTruncateWithoutRecovery.

// endregion
// region Truncation
/**
 * Tests the truncate() method without doing any recovery.
 */
@Test
public void testTruncateWithoutRecovery() {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Setup a DurableLog and start it.
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
    @Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
    @Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
    // First DurableLog. We use this for generating data.
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Hook up a listener to figure out when truncation actually happens.
        dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        Set<Long> streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, durableLog);
        List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        // Process all operations.
        OperationWithCompletion.allOf(processOperations(queuedOperations, durableLog)).join();
        // Add a MetadataCheckpointOperation at the end, after everything else has processed. This ensures that it
        // sits in a DataFrame by itself and enables us to truncate everything at the end.
        processOperation(new MetadataCheckpointOperation(), durableLog).completion.join();
        awaitLastOperationAdded(durableLog, metadata);
        // Get a list of all the operations, before truncation.
        List<Operation> originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        boolean fullTruncationPossible = false;
        long currentTruncatedSeqNo = originalOperations.get(0).getSequenceNumber();
        // At the end, verify all operations and all entries in the DataLog were truncated.
        for (int i = 0; i < originalOperations.size(); i++) {
            Operation currentOperation = originalOperations.get(i);
            truncationOccurred.set(false);
            if (currentOperation instanceof MetadataCheckpointOperation) {
                // Perform the truncation.
                durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
                awaitLastOperationAdded(durableLog, metadata);
                if (currentOperation.getSequenceNumber() != currentTruncatedSeqNo) {
                    // If the operation we're about to truncate to is actually the first in the log, then we should
                    // not be expecting any truncation.
                    Assert.assertTrue("No truncation occurred even though a valid Truncation Point was passed: " + currentOperation.getSequenceNumber(), truncationOccurred.get());
                    // Now verify that we get a StorageMetadataCheckpointOperation queued.
                    AssertExtensions.assertGreaterThan("Expected an operation to be queued as part of truncation.", 0, durableLog.getInMemoryOperationLog().size());
                    val readAfterTruncate = durableLog.read(1, TIMEOUT).join();
                    Assert.assertTrue("Expected a StorageMetadataCheckpointOperation to be queued as part of truncation.", readAfterTruncate.poll() instanceof StorageMetadataCheckpointOperation);
                }
                if (i == originalOperations.size()) {
                    // Sometimes the Truncation Point is on the same DataFrame as other data, and it's the last DataFrame;
                    // In that case, it cannot be truncated, since truncating the frame would mean losing the Checkpoint as well.
                    fullTruncationPossible = durableLog.getInMemoryOperationLog().size() == 0;
                }
            } else {
                // Verify we are not allowed to truncate on non-valid Truncation Points.
                AssertExtensions.assertSuppliedFutureThrows("DurableLog allowed truncation on a non-MetadataCheckpointOperation.", () -> durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT), ex -> ex instanceof IllegalArgumentException);
                Assert.assertFalse("Not expecting a truncation to have occurred.", truncationOccurred.get());
            }
        }
        // Verify that we can still queue operations to the DurableLog and they can be read.
        // In this case we'll just queue some StreamSegmentMapOperations.
        StreamSegmentMapOperation newOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name("foo").build());
        if (!fullTruncationPossible) {
            // We were not able to do a full truncation before. Do one now, since we are guaranteed to have a new DataFrame available.
            MetadataCheckpointOperation lastCheckpoint = new MetadataCheckpointOperation();
            durableLog.add(lastCheckpoint, OperationPriority.Normal, TIMEOUT).join();
            awaitLastOperationAdded(durableLog, metadata);
            durableLog.truncate(lastCheckpoint.getSequenceNumber(), TIMEOUT).join();
        }
        durableLog.add(newOp, OperationPriority.Normal, TIMEOUT).join();
        awaitLastOperationAdded(durableLog, metadata);
        // Full Checkpoint + Storage Checkpoint (auto-added)+ new op
        final int expectedOperationCount = 3;
        List<Operation> newOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        Assert.assertEquals("Unexpected number of operations added after full truncation.", expectedOperationCount, newOperations.size());
        Assert.assertTrue("Expecting the first operation after full truncation to be a MetadataCheckpointOperation.", newOperations.get(0) instanceof MetadataCheckpointOperation);
        Assert.assertTrue("Expecting a StorageMetadataCheckpointOperation to be auto-added after full truncation.", newOperations.get(1) instanceof StorageMetadataCheckpointOperation);
        Assert.assertEquals("Unexpected Operation encountered after full truncation.", newOp, newOperations.get(2));
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) Cleanup(lombok.Cleanup) CacheManager(io.pravega.segmentstore.server.CacheManager) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) lombok.val(lombok.val) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) AtomicReference(java.util.concurrent.atomic.AtomicReference) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Example 2 with ContainerReadIndex

use of io.pravega.segmentstore.server.reading.ContainerReadIndex in project pravega by pravega.

the class DurableLogTests method testTruncateWithStorageMetadataCheckpoints.

/**
 * Tests the ability of the truncate() method to auto-queue (and wait for) mini-metadata checkpoints containing items
 * that are not updated via normal operations. Such items include StorageLength and IsSealedInStorage.
 */
@Test
public void testTruncateWithStorageMetadataCheckpoints() {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Setup a DurableLog and start it.
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
    @Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
    @Cleanup val readIndex1 = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, storage, cacheManager, executorService());
    Set<Long> streamSegmentIds;
    List<OperationWithCompletion> completionFutures;
    // First DurableLog. We use this for generating data.
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex1, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, durableLog);
        List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        completionFutures = processOperations(queuedOperations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Update the metadata with Storage-related data. Set some arbitrary StorageOffsets and seal 50% of the segments in storage.
        long storageOffset = 0;
        for (long segmentId : streamSegmentIds) {
            val sm = metadata1.getStreamSegmentMetadata(segmentId);
            sm.setStorageLength(Math.min(storageOffset, sm.getLength()));
            storageOffset++;
            if (sm.isSealed() && storageOffset % 2 == 0) {
                sm.markSealedInStorage();
            }
        }
        // Truncate at the last possible truncation point.
        val originalOperations = readUpToSequenceNumber(durableLog, metadata1.getOperationSequenceNumber());
        long lastCheckpointSeqNo = -1;
        for (Operation o : originalOperations) {
            if (o instanceof MetadataCheckpointOperation) {
                lastCheckpointSeqNo = o.getSequenceNumber();
            }
        }
        AssertExtensions.assertGreaterThan("Could not find any truncation points.", 0, lastCheckpointSeqNo);
        durableLog.truncate(lastCheckpointSeqNo, TIMEOUT).join();
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Start a second DurableLog and then verify the metadata.
    val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup val readIndex2 = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, storage, cacheManager, executorService());
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex2, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Check Metadata1 vs Metadata2
        for (long segmentId : streamSegmentIds) {
            val sm1 = metadata1.getStreamSegmentMetadata(segmentId);
            val sm2 = metadata2.getStreamSegmentMetadata(segmentId);
            Assert.assertEquals("StorageLength differs for recovered segment " + segmentId, sm1.getStorageLength(), sm2.getStorageLength());
            Assert.assertEquals("IsSealedInStorage differs for recovered segment " + segmentId, sm1.isSealedInStorage(), sm2.isSealedInStorage());
        }
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : lombok.val(lombok.val) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) Cleanup(lombok.Cleanup) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) CacheManager(io.pravega.segmentstore.server.CacheManager) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Example 3 with ContainerReadIndex

use of io.pravega.segmentstore.server.reading.ContainerReadIndex in project pravega by pravega.

the class DurableLogTests method testRecoveryWithNoFailures.

// endregion
// region Recovery
/**
 * Tests the DurableLog recovery process in a scenario when there are no failures during the process.
 */
@Test
public void testRecoveryWithNoFailures() throws Exception {
    int streamSegmentCount = 50;
    int transactionsPerStreamSegment = 2;
    int appendsPerStreamSegment = 20;
    boolean mergeTransactions = true;
    boolean sealStreamSegments = true;
    // Setup a DurableLog and start it.
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    Set<Long> streamSegmentIds;
    AbstractMap<Long, Long> transactions;
    List<OperationWithCompletion> completionFutures;
    List<Operation> originalOperations;
    // First DurableLog. We use this for generating data.
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
    @Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, durableLog);
        transactions = createTransactionsWithOperations(streamSegmentIds, transactionsPerStreamSegment, metadata, durableLog);
        List<Operation> operations = generateOperations(streamSegmentIds, transactions, appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, mergeTransactions, sealStreamSegments);
        // Process all generated operations and wait for them to complete
        completionFutures = processOperations(operations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Get a list of all the operations, before recovery.
        originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Second DurableLog. We use this for recovery.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        List<Operation> recoveredOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        assertRecoveredOperationsMatch(originalOperations, recoveredOperations);
        performMetadataChecks(streamSegmentIds, new HashSet<>(), transactions, completionFutures, metadata, mergeTransactions, sealStreamSegments);
        performReadIndexChecks(completionFutures, readIndex);
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) Cleanup(lombok.Cleanup) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) CacheManager(io.pravega.segmentstore.server.CacheManager) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Example 4 with ContainerReadIndex

use of io.pravega.segmentstore.server.reading.ContainerReadIndex in project pravega by pravega.

the class DurableLogTests method testRecoveryFailures.

/**
 * Tests the DurableLog recovery process in a scenario when there are failures during the process
 * (these may or may not be DataCorruptionExceptions).
 */
@Test
public void testRecoveryFailures() throws Exception {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Fail DataLog reads after X reads.
    int failReadAfter = 2;
    // Setup a DurableLog and start it.
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    Set<Long> streamSegmentIds;
    List<OperationWithCompletion> completionFutures;
    // First DurableLog. We use this for generating data.
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
    @Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, durableLog);
        List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        // Process all generated operations and wait for them to complete
        completionFutures = processOperations(operations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Recovery failure due to DataLog Failures.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // Inject some artificial error into the DataLogRead after a few reads.
        ErrorInjector<Exception> readNextInjector = new ErrorInjector<>(count -> count > failReadAfter, () -> new DataLogNotAvailableException("intentional"));
        dataLog.get().setReadErrorInjectors(null, readNextInjector);
        // Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
        // multiple layers, so we need to dig deep into it.
        AssertExtensions.assertThrows("Recovery did not fail properly when expecting DurableDataLogException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
            if (ex instanceof IllegalStateException) {
                ex = ex.getCause();
            }
            if (ex == null) {
                try {
                    // We need this to enter a FAILED state to get its failure cause.
                    durableLog.awaitTerminated();
                } catch (Exception ex2) {
                    ex = durableLog.failureCause();
                }
            }
            ex = Exceptions.unwrap(ex);
            return ex instanceof DataLogNotAvailableException && ex.getMessage().equals("intentional");
        });
    }
    // Recovery failure due to DataCorruptionException.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // Reset error injectors to nothing.
        dataLog.get().setReadErrorInjectors(null, null);
        AtomicInteger readCounter = new AtomicInteger();
        dataLog.get().setReadInterceptor(readItem -> {
            if (readCounter.incrementAndGet() > failReadAfter && readItem.getLength() > DataFrame.MIN_ENTRY_LENGTH_NEEDED) {
                // Mangle with the payload and overwrite its contents with a DataFrame having a bogus
                // previous sequence number.
                DataFrame df = DataFrame.ofSize(readItem.getLength());
                df.seal();
                CompositeArrayView serialization = df.getData();
                return new InjectedReadItem(serialization.getReader(), serialization.getLength(), readItem.getAddress());
            }
            return readItem;
        });
        // Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
        // multiple layers, so we need to dig deep into it.
        AssertExtensions.assertThrows("Recovery did not fail properly when expecting DataCorruptionException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
            if (ex instanceof IllegalStateException) {
                ex = ex.getCause();
            }
            return Exceptions.unwrap(ex) instanceof DataCorruptionException;
        });
        // Verify that the underlying DurableDataLog has been disabled.
        val disabledDataLog = dataLogFactory.createDurableDataLog(CONTAINER_ID);
        AssertExtensions.assertThrows("DurableDataLog has not been disabled following a recovery failure with DataCorruptionException.", () -> disabledDataLog.initialize(TIMEOUT), ex -> ex instanceof DataLogDisabledException);
    }
}
Also used : DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) CompositeArrayView(io.pravega.common.util.CompositeArrayView) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) Cleanup(lombok.Cleanup) DataLogDisabledException(io.pravega.segmentstore.storage.DataLogDisabledException) CacheManager(io.pravega.segmentstore.server.CacheManager) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) lombok.val(lombok.val) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ErrorInjector(io.pravega.test.common.ErrorInjector) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) AtomicReference(java.util.concurrent.atomic.AtomicReference) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) TimeoutException(java.util.concurrent.TimeoutException) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ContainerOfflineException(io.pravega.segmentstore.server.ContainerOfflineException) CompletionException(java.util.concurrent.CompletionException) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) DataLogDisabledException(io.pravega.segmentstore.storage.DataLogDisabledException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) Storage(io.pravega.segmentstore.storage.Storage) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Test(org.junit.Test)

Example 5 with ContainerReadIndex

use of io.pravega.segmentstore.server.reading.ContainerReadIndex in project pravega by pravega.

the class DurableLogTests method testRecoveryWithDisabledDataLog.

/**
 * Verifies the ability of hte DurableLog to recover (delayed start) using a disabled DurableDataLog. This verifies
 * the ability to shut down correctly while still waiting for the DataLog to become enabled as well as detecting that
 * it did become enabled and then resume normal operations.
 */
@Test
public void testRecoveryWithDisabledDataLog() throws Exception {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    @Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
    @Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
    // Write some data to the log. We'll read it later.
    Set<Long> streamSegmentIds;
    List<Operation> originalOperations;
    List<OperationWithCompletion> completionFutures;
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // DurableLog should start properly.
        durableLog.startAsync().awaitRunning();
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, durableLog);
        List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        completionFutures = processOperations(operations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
    }
    // Disable the DurableDataLog. This requires us to initialize the log, then disable it.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // DurableLog should start properly.
        durableLog.startAsync().awaitRunning();
        CompletableFuture<Void> online = durableLog.awaitOnline();
        Assert.assertTrue("awaitOnline() returned an incomplete future.", Futures.isSuccessful(online));
        Assert.assertFalse("Not expecting an offline DurableLog.", durableLog.isOffline());
        dataLog.get().disable();
    }
    // Verify that the DurableLog starts properly and that all operations throw appropriate exceptions.
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // DurableLog should start properly.
        durableLog.startAsync().awaitRunning();
        CompletableFuture<Void> online = durableLog.awaitOnline();
        Assert.assertFalse("awaitOnline() returned a completed future.", online.isDone());
        Assert.assertTrue("Expecting an offline DurableLog.", durableLog.isOffline());
        // Verify all operations fail with the right exception.
        AssertExtensions.assertSuppliedFutureThrows("add() did not fail with the right exception when offline.", () -> durableLog.add(new StreamSegmentSealOperation(123), OperationPriority.Normal, TIMEOUT), ex -> ex instanceof ContainerOfflineException);
        AssertExtensions.assertSuppliedFutureThrows("read() did not fail with the right exception when offline.", () -> durableLog.read(1, TIMEOUT), ex -> ex instanceof ContainerOfflineException);
        AssertExtensions.assertSuppliedFutureThrows("truncate() did not fail with the right exception when offline.", () -> durableLog.truncate(0, TIMEOUT), ex -> ex instanceof ContainerOfflineException);
        // Verify we can also shut it down properly from this state.
        durableLog.stopAsync().awaitTerminated();
        Assert.assertTrue("awaitOnline() returned future did not fail when DurableLog shut down.", online.isCompletedExceptionally());
    }
    // Verify that, when the DurableDataLog becomes enabled, the DurableLog can pick up the change and resume normal operations.
    // Verify that the DurableLog starts properly and that all operations throw appropriate exceptions.
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // DurableLog should start properly.
        durableLog.startAsync().awaitRunning();
        CompletableFuture<Void> online = durableLog.awaitOnline();
        Assert.assertFalse("awaitOnline() returned a completed future.", online.isDone());
        // Enable the underlying data log and await for recovery to finish.
        dataLog.get().enable();
        online.get(START_RETRY_DELAY_MILLIS * 100, TimeUnit.MILLISECONDS);
        Assert.assertFalse("Not expecting an offline DurableLog after re-enabling.", durableLog.isOffline());
        // Verify we can still read the data that we wrote before the DataLog was disabled.
        List<Operation> recoveredOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        assertRecoveredOperationsMatch(originalOperations, recoveredOperations);
        performMetadataChecks(streamSegmentIds, new HashSet<>(), new HashMap<>(), completionFutures, metadata, false, false);
        performReadIndexChecks(completionFutures, readIndex);
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) ContainerOfflineException(io.pravega.segmentstore.server.ContainerOfflineException) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) Cleanup(lombok.Cleanup) CacheManager(io.pravega.segmentstore.server.CacheManager) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) AtomicReference(java.util.concurrent.atomic.AtomicReference) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Aggregations

CacheManager (io.pravega.segmentstore.server.CacheManager)9 MetadataBuilder (io.pravega.segmentstore.server.MetadataBuilder)9 TestDurableDataLogFactory (io.pravega.segmentstore.server.TestDurableDataLogFactory)9 StreamSegmentMapOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation)9 ContainerReadIndex (io.pravega.segmentstore.server.reading.ContainerReadIndex)9 Storage (io.pravega.segmentstore.storage.Storage)9 CacheStorage (io.pravega.segmentstore.storage.cache.CacheStorage)9 DirectMemoryCache (io.pravega.segmentstore.storage.cache.DirectMemoryCache)9 InMemoryDurableDataLogFactory (io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory)9 Cleanup (lombok.Cleanup)9 Test (org.junit.Test)9 ReadIndex (io.pravega.segmentstore.server.ReadIndex)8 CachedStreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation)8 DeleteSegmentOperation (io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation)8 MergeSegmentOperation (io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation)8 MetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation)8 Operation (io.pravega.segmentstore.server.logs.operations.Operation)8 StorageMetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation)8 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)8 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)8