Search in sources :

Example 6 with Operation

use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.

the class ContainerMetadataUpdateTransactionTests method testAcceptStreamSegmentTruncate.

/**
 * Tests the acceptOperation method with StreamSegmentTruncate operations.
 */
@Test
public void testAcceptStreamSegmentTruncate() throws Exception {
    val metadata = createMetadata();
    val append = createAppendNoOffset();
    // Here, we also Seal, since in preProcessStreamSegmentTruncate we did not.
    val seal = createSeal();
    final long truncateOffset = SEGMENT_LENGTH + append.getLength() / 2;
    val truncate = createTruncate(truncateOffset);
    // Apply all operations in order, in the same transaction. This helps verify that, should these operations happen
    // concurrently, they are applied to the metadata in the correct order.
    val txn1 = createUpdateTransaction(metadata);
    for (Operation o : Arrays.asList(append, seal, truncate)) {
        txn1.preProcessOperation(o);
        txn1.acceptOperation(o);
    }
    // Attempt some more invalid truncate operations.
    AssertExtensions.assertThrows("preProcessOperation accepted a truncate operation with wrong offset (smaller).", () -> txn1.preProcessOperation(createTruncate(truncateOffset - 1)), ex -> ex instanceof BadOffsetException);
    AssertExtensions.assertThrows("preProcessOperation accepted a truncate operation with wrong offset (larger).", () -> txn1.preProcessOperation(createTruncate(truncateOffset + append.getLength())), ex -> ex instanceof BadOffsetException);
    // Verify the Update Transaction has been updated, but the metadata has not yet been touched.
    val sm = metadata.getStreamSegmentMetadata(SEGMENT_ID);
    Assert.assertEquals("Unexpected StartOffset in UpdateTransaction.", truncateOffset, txn1.getStreamSegmentMetadata(SEGMENT_ID).getStartOffset());
    Assert.assertEquals("Unexpected StartOffset in Metadata pre-commit.", 0, sm.getStartOffset());
    // Commit and verify that the metadata has been correctly updated.
    txn1.commit(metadata);
    Assert.assertEquals("Unexpected StartOffset in Metadata post-commit.", truncateOffset, sm.getStartOffset());
    Assert.assertEquals("Unexpected Length in Metadata post-commit.", append.getStreamSegmentOffset() + append.getLength(), sm.getLength());
    Assert.assertTrue("Unexpected Sealed status in Metadata post-commit.", sm.isSealed());
    // Verify single truncate operation (check to see that it reads from actual metadata if needed).
    val op2 = createTruncate(truncateOffset + 1);
    val txn2 = createUpdateTransaction(metadata);
    txn2.preProcessOperation(op2);
    txn2.acceptOperation(op2);
    txn2.commit(metadata);
    Assert.assertEquals("Unexpected StartOffset in Metadata post-commit (second).", op2.getStreamSegmentOffset(), sm.getStartOffset());
    // Verify truncating the entire segment.
    val op3 = createTruncate(sm.getLength());
    val txn3 = createUpdateTransaction(metadata);
    txn3.preProcessOperation(op3);
    txn3.acceptOperation(op3);
    txn3.commit(metadata);
    Assert.assertEquals("Unexpected StartOffset in Metadata when truncating entire segment.", sm.getLength(), sm.getStartOffset());
}
Also used : lombok.val(lombok.val) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) Test(org.junit.Test)

Example 7 with Operation

use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.

the class ContainerMetadataUpdateTransactionTests method testMaxAttributeLimit.

/**
 * Tests the ability of the ContainerMetadataUpdateTransaction to enforce the maximum attribute limit on Segments.
 */
@Test
public void testMaxAttributeLimit() throws Exception {
    // We check all operations that can update attributes.
    val ops = new HashMap<String, Function<Collection<AttributeUpdate>, Operation>>();
    ops.put("UpdateAttributes", u -> new UpdateAttributesOperation(SEGMENT_ID, u));
    ops.put("Append", u -> new StreamSegmentAppendOperation(SEGMENT_ID, DEFAULT_APPEND_DATA, u));
    // Set the maximum allowed number of attributes on a segment.
    UpdateableContainerMetadata metadata = createMetadata();
    val initialUpdates = new ArrayList<AttributeUpdate>(SegmentMetadata.MAXIMUM_ATTRIBUTE_COUNT);
    val expectedValues = new HashMap<UUID, Long>();
    for (int i = 0; i < SegmentMetadata.MAXIMUM_ATTRIBUTE_COUNT; i++) {
        UUID attributeId;
        do {
            attributeId = UUID.randomUUID();
        } while (expectedValues.containsKey(attributeId));
        initialUpdates.add(new AttributeUpdate(attributeId, AttributeUpdateType.None, i));
        expectedValues.put(attributeId, (long) i);
    }
    // And load them up into an UpdateTransaction.
    val txn = createUpdateTransaction(metadata);
    val initialOp = new UpdateAttributesOperation(SEGMENT_ID, initialUpdates);
    txn.preProcessOperation(initialOp);
    txn.acceptOperation(initialOp);
    // invokes preProcessOperation() - which is responsible with validation, so no changes are made to the UpdateTransaction.
    for (val opGenerator : ops.entrySet()) {
        // Value replacement.
        val replacementUpdates = new ArrayList<AttributeUpdate>();
        int i = 0;
        for (val e : expectedValues.entrySet()) {
            AttributeUpdate u;
            switch((i++) % 4) {
                case 0:
                    u = new AttributeUpdate(e.getKey(), AttributeUpdateType.ReplaceIfEquals, e.getValue() + 1, e.getValue());
                    break;
                case 1:
                    u = new AttributeUpdate(e.getKey(), AttributeUpdateType.ReplaceIfGreater, e.getValue() + 1);
                    break;
                case 2:
                    u = new AttributeUpdate(e.getKey(), AttributeUpdateType.Accumulate, 1);
                    break;
                default:
                    u = new AttributeUpdate(e.getKey(), AttributeUpdateType.Replace, 1);
                    break;
            }
            replacementUpdates.add(u);
        }
        // This should not throw anything.
        txn.preProcessOperation(opGenerator.getValue().apply(replacementUpdates));
        // Removal - this should not throw anything either.
        val toRemoveId = initialUpdates.get(0).getAttributeId();
        val toRemoveUpdate = new AttributeUpdate(toRemoveId, AttributeUpdateType.Replace, SegmentMetadata.NULL_ATTRIBUTE_VALUE);
        txn.preProcessOperation(opGenerator.getValue().apply(Collections.singleton(toRemoveUpdate)));
        // Addition - this should throw.
        UUID toAddId;
        do {
            toAddId = UUID.randomUUID();
        } while (expectedValues.containsKey(toAddId));
        val toAddUpdate = new AttributeUpdate(toAddId, AttributeUpdateType.None, 1);
        AssertExtensions.assertThrows("Too many attributes were accepted for operation " + opGenerator.getKey(), () -> txn.preProcessOperation(opGenerator.getValue().apply(Collections.singleton(toAddUpdate))), ex -> ex instanceof TooManyAttributesException);
        // Removal+Addition+Replacement: this particular setup should not throw anything.
        val mixedUpdates = Arrays.asList(new AttributeUpdate(toAddId, AttributeUpdateType.None, 1), new AttributeUpdate(toRemoveId, AttributeUpdateType.Replace, SegmentMetadata.NULL_ATTRIBUTE_VALUE), new AttributeUpdate(initialUpdates.get(1).getAttributeId(), AttributeUpdateType.Replace, 10));
        txn.preProcessOperation(opGenerator.getValue().apply(mixedUpdates));
    }
}
Also used : lombok.val(lombok.val) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) TooManyAttributesException(io.pravega.segmentstore.contracts.TooManyAttributesException) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Collection(java.util.Collection) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) UpdateAttributesOperation(io.pravega.segmentstore.server.logs.operations.UpdateAttributesOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) UUID(java.util.UUID) Test(org.junit.Test)

Example 8 with Operation

use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.

the class DurableLogTests method testRecoveryFailures.

/**
 * Tests the DurableLog recovery process in a scenario when there are failures during the process
 * (these may or may not be DataCorruptionExceptions).
 */
@Test
public void testRecoveryFailures() throws Exception {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Fail DataLog reads after X reads.
    int failReadAfter = 2;
    // Setup a DurableLog and start it.
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    HashSet<Long> streamSegmentIds;
    List<OperationWithCompletion> completionFutures;
    // First DurableLog. We use this for generating data.
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
        List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        // Process all generated operations and wait for them to complete
        completionFutures = processOperations(operations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Recovery failure due to DataLog Failures.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // Inject some artificial error into the DataLogRead after a few reads.
        ErrorInjector<Exception> readNextInjector = new ErrorInjector<>(count -> count > failReadAfter, () -> new DataLogNotAvailableException("intentional"));
        dataLog.get().setReadErrorInjectors(null, readNextInjector);
        // Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
        // multiple layers, so we need to dig deep into it.
        AssertExtensions.assertThrows("Recovery did not fail properly when expecting DurableDataLogException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
            if (ex instanceof IllegalStateException) {
                ex = ex.getCause();
            }
            if (ex == null) {
                try {
                    // We need this to enter a FAILED state to get its failure cause.
                    durableLog.awaitTerminated();
                } catch (Exception ex2) {
                    ex = durableLog.failureCause();
                }
            }
            ex = Exceptions.unwrap(ex);
            return ex instanceof DataLogNotAvailableException && ex.getMessage().equals("intentional");
        });
    }
    // Recovery failure due to DataCorruptionException.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // Reset error injectors to nothing.
        dataLog.get().setReadErrorInjectors(null, null);
        AtomicInteger readCounter = new AtomicInteger();
        dataLog.get().setReadInterceptor(readItem -> {
            if (readCounter.incrementAndGet() > failReadAfter && readItem.getLength() > DataFrame.MIN_ENTRY_LENGTH_NEEDED) {
                // Mangle with the payload and overwrite its contents with a DataFrame having a bogus
                // previous sequence number.
                DataFrame df = DataFrame.ofSize(readItem.getLength());
                df.seal();
                ArrayView serialization = df.getData();
                return new InjectedReadItem(serialization.getReader(), serialization.getLength(), readItem.getAddress());
            }
            return readItem;
        });
        // Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
        // multiple layers, so we need to dig deep into it.
        AssertExtensions.assertThrows("Recovery did not fail properly when expecting DataCorruptionException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
            if (ex instanceof IllegalStateException) {
                ex = ex.getCause();
            }
            return Exceptions.unwrap(ex) instanceof DataCorruptionException;
        });
        // Verify that the underlying DurableDataLog has been disabled.
        val disabledDataLog = dataLogFactory.createDurableDataLog(CONTAINER_ID);
        AssertExtensions.assertThrows("DurableDataLog has not been disabled following a recovery failure with DataCorruptionException.", () -> disabledDataLog.initialize(TIMEOUT), ex -> ex instanceof DataLogDisabledException);
    }
}
Also used : TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) DataLogDisabledException(io.pravega.segmentstore.storage.DataLogDisabledException) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) lombok.val(lombok.val) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ErrorInjector(io.pravega.test.common.ErrorInjector) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) TimeoutException(java.util.concurrent.TimeoutException) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ContainerOfflineException(io.pravega.segmentstore.server.ContainerOfflineException) CompletionException(java.util.concurrent.CompletionException) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) DataLogDisabledException(io.pravega.segmentstore.storage.DataLogDisabledException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) Storage(io.pravega.segmentstore.storage.Storage) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) ArrayView(io.pravega.common.util.ArrayView) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Test(org.junit.Test)

Example 9 with Operation

use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.

the class DurableLogTests method testTruncateWithStorageMetadataCheckpoints.

/**
 * Tests the ability of the truncate() method to auto-queue (and wait for) mini-metadata checkpoints containing items
 * that are not updated via normal operations. Such items include StorageLength and IsSealedInStorage.
 */
@Test
public void testTruncateWithStorageMetadataCheckpoints() {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Setup a DurableLog and start it.
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    @Cleanup val readIndex1 = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, cacheFactory, storage, cacheManager, executorService());
    HashSet<Long> streamSegmentIds;
    List<OperationWithCompletion> completionFutures;
    // First DurableLog. We use this for generating data.
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex1, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata1, durableLog, storage);
        List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        completionFutures = processOperations(queuedOperations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Update the metadata with Storage-related data. Set some arbitrary StorageOffsets and seal 50% of the segments in storage.
        long storageOffset = 0;
        for (long segmentId : streamSegmentIds) {
            val sm = metadata1.getStreamSegmentMetadata(segmentId);
            sm.setStorageLength(Math.min(storageOffset, sm.getLength()));
            storageOffset++;
            if (sm.isSealed() && storageOffset % 2 == 0) {
                sm.markSealedInStorage();
            }
        }
        // Truncate at the last possible truncation point.
        val originalOperations = readUpToSequenceNumber(durableLog, metadata1.getOperationSequenceNumber());
        long lastCheckpointSeqNo = -1;
        for (Operation o : originalOperations) {
            if (o instanceof MetadataCheckpointOperation) {
                lastCheckpointSeqNo = o.getSequenceNumber();
            }
        }
        AssertExtensions.assertGreaterThan("Could not find any truncation points.", 0, lastCheckpointSeqNo);
        durableLog.truncate(lastCheckpointSeqNo, TIMEOUT).join();
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Start a second DurableLog and then verify the metadata.
    val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup val readIndex2 = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, cacheFactory, storage, cacheManager, executorService());
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex2, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Check Metadata1 vs Metadata2
        for (long segmentId : streamSegmentIds) {
            val sm1 = metadata1.getStreamSegmentMetadata(segmentId);
            val sm2 = metadata2.getStreamSegmentMetadata(segmentId);
            Assert.assertEquals("StorageLength differs for recovered segment " + segmentId, sm1.getStorageLength(), sm2.getStorageLength());
            Assert.assertEquals("IsSealedInStorage differs for recovered segment " + segmentId, sm1.isSealedInStorage(), sm2.isSealedInStorage());
        }
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : lombok.val(lombok.val) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Example 10 with Operation

use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.

the class DurableLogTests method testRecoveryWithNoFailures.

// endregion
// region Recovery
/**
 * Tests the DurableLog recovery process in a scenario when there are no failures during the process.
 */
@Test
public void testRecoveryWithNoFailures() throws Exception {
    int streamSegmentCount = 50;
    int transactionsPerStreamSegment = 2;
    int appendsPerStreamSegment = 20;
    boolean mergeTransactions = true;
    boolean sealStreamSegments = true;
    // Setup a DurableLog and start it.
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    HashSet<Long> streamSegmentIds;
    AbstractMap<Long, Long> transactions;
    List<OperationWithCompletion> completionFutures;
    List<Operation> originalOperations;
    // First DurableLog. We use this for generating data.
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
        transactions = createTransactionsWithOperations(streamSegmentIds, transactionsPerStreamSegment, metadata, durableLog, storage);
        List<Operation> operations = generateOperations(streamSegmentIds, transactions, appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, mergeTransactions, sealStreamSegments);
        // Process all generated operations and wait for them to complete
        completionFutures = processOperations(operations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Get a list of all the operations, before recovery.
        originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Second DurableLog. We use this for recovery.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        List<Operation> recoveredOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        assertRecoveredOperationsMatch(originalOperations, recoveredOperations);
        performMetadataChecks(streamSegmentIds, new HashSet<>(), transactions, completionFutures, metadata, mergeTransactions, sealStreamSegments);
        performReadIndexChecks(completionFutures, readIndex);
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) Cleanup(lombok.Cleanup) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Aggregations

Operation (io.pravega.segmentstore.server.logs.operations.Operation)51 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)46 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)41 ProbeOperation (io.pravega.segmentstore.server.logs.operations.ProbeOperation)27 Test (org.junit.Test)27 MetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation)26 Cleanup (lombok.Cleanup)24 StreamSegmentMapOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation)23 MergeTransactionOperation (io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation)22 StorageMetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation)22 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)21 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)19 CompletionException (java.util.concurrent.CompletionException)19 UpdateableContainerMetadata (io.pravega.segmentstore.server.UpdateableContainerMetadata)18 StreamSegmentSealOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation)18 Storage (io.pravega.segmentstore.storage.Storage)18 Duration (java.time.Duration)18 AtomicReference (java.util.concurrent.atomic.AtomicReference)18 StreamSegmentTruncateOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation)17 Exceptions (io.pravega.common.Exceptions)16