Search in sources :

Example 6 with ReadIndex

use of io.pravega.segmentstore.server.ReadIndex in project pravega by pravega.

the class MemoryStateUpdaterTests method testProcess.

/**
 * Tests the functionality of the process() method.
 */
@Test
public void testProcess() throws Exception {
    int segmentCount = 10;
    int operationCountPerType = 5;
    // Add to MTL + Add to ReadIndex (append; beginMerge).
    SequencedItemList<Operation> opLog = new SequencedItemList<>();
    ArrayList<TestReadIndex.MethodInvocation> methodInvocations = new ArrayList<>();
    TestReadIndex readIndex = new TestReadIndex(methodInvocations::add);
    AtomicInteger flushCallbackCallCount = new AtomicInteger();
    MemoryStateUpdater updater = new MemoryStateUpdater(opLog, readIndex, flushCallbackCallCount::incrementAndGet);
    ArrayList<Operation> operations = populate(updater, segmentCount, operationCountPerType);
    // Verify they were properly processed.
    int triggerFutureCount = (int) methodInvocations.stream().filter(mi -> mi.methodName.equals(TestReadIndex.TRIGGER_FUTURE_READS)).count();
    int addCount = methodInvocations.size() - triggerFutureCount;
    Assert.assertEquals("Unexpected number of items added to ReadIndex.", operations.size() - segmentCount * operationCountPerType, addCount);
    Assert.assertEquals("Unexpected number of calls to the ReadIndex triggerFutureReads method.", 1, triggerFutureCount);
    Assert.assertEquals("Unexpected number of calls to the flushCallback provided in the constructor.", 1, flushCallbackCallCount.get());
    // Verify add calls.
    Iterator<Operation> logIterator = opLog.read(-1, operations.size());
    int currentIndex = -1;
    int currentReadIndex = -1;
    while (logIterator.hasNext()) {
        currentIndex++;
        Operation expected = operations.get(currentIndex);
        Operation actual = logIterator.next();
        if (expected instanceof StorageOperation) {
            currentReadIndex++;
            TestReadIndex.MethodInvocation invokedMethod = methodInvocations.get(currentReadIndex);
            if (expected instanceof StreamSegmentAppendOperation) {
                Assert.assertTrue("StreamSegmentAppendOperation was not added as a CachedStreamSegmentAppendOperation to the Memory Log.", actual instanceof CachedStreamSegmentAppendOperation);
                StreamSegmentAppendOperation appendOp = (StreamSegmentAppendOperation) expected;
                Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was not added to the ReadIndex.", TestReadIndex.APPEND, invokedMethod.methodName);
                Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getStreamSegmentId(), invokedMethod.args.get("streamSegmentId"));
                Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getStreamSegmentOffset(), invokedMethod.args.get("offset"));
                Assert.assertEquals("Append with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", appendOp.getData(), invokedMethod.args.get("data"));
            } else if (expected instanceof MergeTransactionOperation) {
                MergeTransactionOperation mergeOp = (MergeTransactionOperation) expected;
                Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was not added to the ReadIndex.", TestReadIndex.BEGIN_MERGE, invokedMethod.methodName);
                Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getStreamSegmentId(), invokedMethod.args.get("targetStreamSegmentId"));
                Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getStreamSegmentOffset(), invokedMethod.args.get("offset"));
                Assert.assertEquals("Merge with SeqNo " + expected.getSequenceNumber() + " was added to the ReadIndex with wrong arguments.", mergeOp.getTransactionSegmentId(), invokedMethod.args.get("sourceStreamSegmentId"));
            }
        }
    }
    // Verify triggerFutureReads args.
    @SuppressWarnings("unchecked") Collection<Long> triggerSegmentIds = (Collection<Long>) methodInvocations.stream().filter(mi -> mi.methodName.equals(TestReadIndex.TRIGGER_FUTURE_READS)).findFirst().get().args.get("streamSegmentIds");
    val expectedSegmentIds = operations.stream().filter(op -> op instanceof SegmentOperation).map(op -> ((SegmentOperation) op).getStreamSegmentId()).collect(Collectors.toSet());
    AssertExtensions.assertContainsSameElements("ReadIndex.triggerFutureReads() was called with the wrong set of StreamSegmentIds.", expectedSegmentIds, triggerSegmentIds);
    // Test DataCorruptionException.
    AssertExtensions.assertThrows("MemoryStateUpdater accepted an operation that was out of order.", // This does not have a SequenceNumber set, so it should trigger a DCE.
    () -> updater.process(new MergeTransactionOperation(1, 2)), ex -> ex instanceof DataCorruptionException);
}
Also used : MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) StreamSegmentInformation(io.pravega.segmentstore.contracts.StreamSegmentInformation) AssertExtensions(io.pravega.test.common.AssertExtensions) Exceptions(io.pravega.common.Exceptions) ContainerMetadata(io.pravega.segmentstore.server.ContainerMetadata) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SequencedItemList(io.pravega.common.util.SequencedItemList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Runnables(com.google.common.util.concurrent.Runnables) Duration(java.time.Duration) Timeout(org.junit.rules.Timeout) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) Iterator(java.util.Iterator) Collection(java.util.Collection) lombok.val(lombok.val) SegmentOperation(io.pravega.segmentstore.server.logs.operations.SegmentOperation) Test(org.junit.Test) Collectors(java.util.stream.Collectors) Consumer(java.util.function.Consumer) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) ReadIndex(io.pravega.segmentstore.server.ReadIndex) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) ReadResult(io.pravega.segmentstore.contracts.ReadResult) InputStream(java.io.InputStream) SegmentOperation(io.pravega.segmentstore.server.logs.operations.SegmentOperation) ArrayList(java.util.ArrayList) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) SegmentOperation(io.pravega.segmentstore.server.logs.operations.SegmentOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) SequencedItemList(io.pravega.common.util.SequencedItemList) lombok.val(lombok.val) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Collection(java.util.Collection) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Test(org.junit.Test)

Example 7 with ReadIndex

use of io.pravega.segmentstore.server.ReadIndex in project pravega by pravega.

the class DurableLogTests method testTruncateWithRecovery.

/**
 * Tests the truncate() method while performing recovery.
 */
@Test
public void testTruncateWithRecovery() {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Setup a DurableLog and start it.
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    @Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
    HashSet<Long> streamSegmentIds;
    List<OperationWithCompletion> completionFutures;
    List<Operation> originalOperations;
    // First DurableLog. We use this for generating data.
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
        List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        completionFutures = processOperations(queuedOperations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Get a list of all the operations, before any truncation.
        originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Truncate up to each MetadataCheckpointOperation and:
    // * If the DataLog was truncated:
    // ** Shut down DurableLog, re-start it (recovery) and verify the operations are as they should.
    // At the end, verify all operations and all entries in the DataLog were truncated.
    DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
    try {
        durableLog.startAsync().awaitRunning();
        dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
        for (int i = 0; i < originalOperations.size(); i++) {
            Operation currentOperation = originalOperations.get(i);
            if (!(currentOperation instanceof MetadataCheckpointOperation)) {
                // We can only truncate on MetadataCheckpointOperations.
                continue;
            }
            truncationOccurred.set(false);
            durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
            if (truncationOccurred.get()) {
                // Close current DurableLog and start a brand new one, forcing recovery.
                durableLog.close();
                durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
                durableLog.startAsync().awaitRunning();
                dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
                // Verify all operations up to, and including this one have been removed.
                Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
                Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
                verifyFirstItemIsMetadataCheckpoint(reader);
                if (i < originalOperations.size() - 1) {
                    Operation firstOp = reader.next();
                    OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
                }
            }
        }
    } finally {
        // This closes whatever current instance this variable refers to, not necessarily the first one.
        durableLog.close();
    }
}
Also used : TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Example 8 with ReadIndex

use of io.pravega.segmentstore.server.ReadIndex in project pravega by pravega.

the class DurableLogTests method testRecoveryWithMetadataCleanup.

/**
 * Tests the following recovery scenario:
 * 1. A Segment is created and recorded in the metadata with some optional operations executing on it.
 * 2. The segment is evicted from the metadata.
 * 3. The segment is reactivated (with a new metadata mapping) - possibly due to an append. No truncation since #2.
 * 4. Recovery.
 */
@Test
public void testRecoveryWithMetadataCleanup() throws Exception {
    final long truncatedSeqNo = Integer.MAX_VALUE;
    // Setup a DurableLog and start it.
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    long segmentId;
    // First DurableLog. We use this for generating data.
    val metadata1 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    SegmentProperties originalSegmentInfo;
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Create the segment.
        val segmentIds = createStreamSegmentsWithOperations(1, metadata1, durableLog, storage);
        segmentId = segmentIds.stream().findFirst().orElse(-1L);
        // Evict the segment.
        val sm1 = metadata1.getStreamSegmentMetadata(segmentId);
        originalSegmentInfo = sm1.getSnapshot();
        // Simulate a truncation. This is needed in order to trigger a cleanup.
        metadata1.removeTruncationMarkers(truncatedSeqNo);
        val cleanedUpSegments = metadata1.cleanup(Collections.singleton(sm1), truncatedSeqNo);
        Assert.assertEquals("Unexpected number of segments evicted.", 1, cleanedUpSegments.size());
        // Map the segment again.
        val reMapOp = new StreamSegmentMapOperation(originalSegmentInfo);
        reMapOp.setStreamSegmentId(segmentId);
        durableLog.add(reMapOp, TIMEOUT).join();
        // Stop.
        durableLog.stopAsync().awaitTerminated();
    }
    // Recovery #1. This should work well.
    val metadata2 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Get segment info
        val recoveredSegmentInfo = metadata1.getStreamSegmentMetadata(segmentId).getSnapshot();
        Assert.assertEquals("Unexpected length from recovered segment.", originalSegmentInfo.getLength(), recoveredSegmentInfo.getLength());
        // Now evict the segment again ...
        val sm = metadata2.getStreamSegmentMetadata(segmentId);
        // Simulate a truncation. This is needed in order to trigger a cleanup.
        metadata2.removeTruncationMarkers(truncatedSeqNo);
        val cleanedUpSegments = metadata2.cleanup(Collections.singleton(sm), truncatedSeqNo);
        Assert.assertEquals("Unexpected number of segments evicted.", 1, cleanedUpSegments.size());
        // ... and re-map it with a new Id. This is a perfectly valid operation, and we can't prevent it.
        durableLog.add(new StreamSegmentMapOperation(originalSegmentInfo), TIMEOUT).join();
        // Stop.
        durableLog.stopAsync().awaitTerminated();
    }
    // Recovery #2. This should fail due to the same segment mapped multiple times with different ids.
    val metadata3 = (StreamSegmentContainerMetadata) new MetadataBuilder(CONTAINER_ID).build();
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata3, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata3, dataLogFactory, readIndex, executorService())) {
        AssertExtensions.assertThrows("Recovery did not fail with the expected exception in case of multi-mapping", () -> durableLog.startAsync().awaitRunning(), ex -> ex instanceof IllegalStateException && ex.getCause() instanceof DataCorruptionException && ex.getCause().getCause() instanceof MetadataUpdateException);
    }
}
Also used : lombok.val(lombok.val) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) Cleanup(lombok.Cleanup) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) StreamSegmentContainerMetadata(io.pravega.segmentstore.server.containers.StreamSegmentContainerMetadata) Storage(io.pravega.segmentstore.storage.Storage) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Test(org.junit.Test)

Aggregations

ReadIndex (io.pravega.segmentstore.server.ReadIndex)8 Test (org.junit.Test)8 MetadataBuilder (io.pravega.segmentstore.server.MetadataBuilder)7 TestDurableDataLogFactory (io.pravega.segmentstore.server.TestDurableDataLogFactory)7 UpdateableContainerMetadata (io.pravega.segmentstore.server.UpdateableContainerMetadata)7 StreamSegmentMapOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation)7 Storage (io.pravega.segmentstore.storage.Storage)7 InMemoryCacheFactory (io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory)7 InMemoryDurableDataLogFactory (io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory)7 Cleanup (lombok.Cleanup)7 Operation (io.pravega.segmentstore.server.logs.operations.Operation)6 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)6 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)6 CacheManager (io.pravega.segmentstore.server.reading.CacheManager)6 ContainerReadIndex (io.pravega.segmentstore.server.reading.ContainerReadIndex)6 MetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation)5 ProbeOperation (io.pravega.segmentstore.server.logs.operations.ProbeOperation)5 StorageMetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation)5 AtomicReference (java.util.concurrent.atomic.AtomicReference)5 TestDurableDataLog (io.pravega.segmentstore.server.TestDurableDataLog)4