Search in sources :

Example 1 with TestDurableDataLog

use of io.pravega.segmentstore.server.TestDurableDataLog in project pravega by pravega.

the class DataFrameBuilderTests method testAppendNoFailure.

private void testAppendNoFailure(int delayMillis) throws Exception {
    // Happy case: append a bunch of data, and make sure the frames that get output contain it.
    ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(RECORD_COUNT / 2, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
    records.addAll(DataFrameTestHelpers.generateLogItems(RECORD_COUNT / 2, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MAX_SIZE, records.size()));
    try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, delayMillis, executorService())) {
        dataLog.initialize(TIMEOUT);
        val order = new HashMap<DataFrameBuilder.CommitArgs, Integer>();
        List<DataFrameBuilder.CommitArgs> commitFrames = Collections.synchronizedList(new ArrayList<>());
        BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
        val args = new DataFrameBuilder.Args(DataFrameTestHelpers.appendOrder(order), commitFrames::add, errorCallback, executorService());
        try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
            for (TestLogItem item : records) {
                b.append(item);
            }
            b.close();
        }
        // Wait for all the frames commit callbacks to be invoked. Even though the DataFrameBuilder waits (upon close)
        // for the OrderedItemProcessor to finish, there are other callbacks chained that need to be completed (such
        // as the one collecting frames in the list above).
        await(() -> commitFrames.size() >= order.size(), delayMillis);
        // It is quite likely that acks will arrive out of order. The DataFrameBuilder has no responsibility for
        // rearrangement; that should be done by its user.
        commitFrames.sort(Comparator.comparingInt(order::get));
        // Check the correctness of the commit callback.
        AssertExtensions.assertGreaterThan("Not enough Data Frames were generated.", 1, commitFrames.size());
        DataFrameBuilder.CommitArgs previousCommitArgs = null;
        for (val ca : commitFrames) {
            if (previousCommitArgs != null) {
                AssertExtensions.assertGreaterThanOrEqual("CommitArgs.getLastFullySerializedSequenceNumber() is not monotonically increasing.", previousCommitArgs.getLastFullySerializedSequenceNumber(), ca.getLastFullySerializedSequenceNumber());
                AssertExtensions.assertGreaterThanOrEqual("CommitArgs.getLastStartedSequenceNumber() is not monotonically increasing.", previousCommitArgs.getLastStartedSequenceNumber(), ca.getLastStartedSequenceNumber());
                AssertExtensions.assertGreaterThanOrEqual("CommitArgs.getLogAddress() is not monotonically increasing.", previousCommitArgs.getLogAddress().getSequence(), ca.getLogAddress().getSequence());
            }
            previousCommitArgs = ca;
        }
        // Read all entries in the Log and interpret them as DataFrames, then verify the records can be reconstructed.
        val frames = dataLog.getAllEntries(readItem -> DataFrame.read(readItem.getPayload(), readItem.getLength(), readItem.getAddress()));
        DataFrameTestHelpers.checkReadRecords(frames, records, r -> new ByteArraySegment(r.getFullSerialization()));
    }
}
Also used : lombok.val(lombok.val) ObjectClosedException(io.pravega.common.ObjectClosedException) AssertExtensions(io.pravega.test.common.AssertExtensions) Exceptions(io.pravega.common.Exceptions) TimeoutException(java.util.concurrent.TimeoutException) Cleanup(lombok.Cleanup) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) Callbacks(io.pravega.common.function.Callbacks) Predicate(java.util.function.Predicate) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) Collectors(java.util.stream.Collectors) ErrorInjector(io.pravega.test.common.ErrorInjector) List(java.util.List) Rule(org.junit.Rule) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Comparator(java.util.Comparator) Assert(org.junit.Assert) Collections(java.util.Collections) ByteArraySegment(io.pravega.common.util.ByteArraySegment) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) HashMap(java.util.HashMap)

Example 2 with TestDurableDataLog

use of io.pravega.segmentstore.server.TestDurableDataLog in project pravega by pravega.

the class DataFrameReaderTests method testReadsWithDeserializationFailure.

/**
 * Tests the case when the DataFrameReader reads from a log and it encounters LogItem SerializationExceptions.
 */
@Test
public void testReadsWithDeserializationFailure() throws Exception {
    // Fail deserialization every X records (write-wise).
    int failDeserializationEvery = 11;
    ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(100, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
    records.addAll(DataFrameTestHelpers.generateLogItems(100, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MAX_SIZE, records.size()));
    try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, executorService())) {
        dataLog.initialize(TIMEOUT);
        BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
        val args = new DataFrameBuilder.Args(Callbacks::doNothing, Callbacks::doNothing, errorCallback, executorService());
        try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
            for (TestLogItem r : records) {
                b.append(r);
            }
        }
        ErrorInjector<SerializationException> errorInjector = new ErrorInjector<>(count -> count % failDeserializationEvery == 0, () -> new SerializationException("TestLogItem.deserialize intentional"));
        TestSerializer logItemFactory = new TestSerializer();
        logItemFactory.setDeserializationErrorInjector(errorInjector);
        testReadWithException(dataLog, logItemFactory, ex -> ex instanceof DataCorruptionException);
    }
}
Also used : ObjectClosedException(io.pravega.common.ObjectClosedException) Callbacks(io.pravega.common.function.Callbacks) DurableDataLog(io.pravega.segmentstore.storage.DurableDataLog) AssertExtensions(io.pravega.test.common.AssertExtensions) Predicate(java.util.function.Predicate) Collection(java.util.Collection) Exceptions(io.pravega.common.Exceptions) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) ErrorInjector(io.pravega.test.common.ErrorInjector) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) List(java.util.List) Rule(org.junit.Rule) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Duration(java.time.Duration) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) SerializationException(io.pravega.common.io.SerializationException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) lombok.val(lombok.val) SerializationException(io.pravega.common.io.SerializationException) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) ErrorInjector(io.pravega.test.common.ErrorInjector) Callbacks(io.pravega.common.function.Callbacks) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Test(org.junit.Test)

Example 3 with TestDurableDataLog

use of io.pravega.segmentstore.server.TestDurableDataLog in project pravega by pravega.

the class DataFrameReaderTests method testReadsWithDataLogFailure.

/**
 * Tests the case when the DataFrameReader reads from a log and it encounters log read failures.
 * 1. Initial read failures.
 * 2. Somewhere in the middle of reading.
 */
@Test
public void testReadsWithDataLogFailure() throws Exception {
    // Fail reads synchronously every X attempts.
    int failReadSyncEvery = 3;
    ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(100, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
    records.addAll(DataFrameTestHelpers.generateLogItems(100, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MAX_SIZE, records.size()));
    try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, executorService())) {
        dataLog.initialize(TIMEOUT);
        BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
        val args = new DataFrameBuilder.Args(Callbacks::doNothing, Callbacks::doNothing, errorCallback, executorService());
        try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
            for (TestLogItem r : records) {
                b.append(r);
            }
        }
        TestSerializer logItemFactory = new TestSerializer();
        // Test 1: Initial call to getReader.
        ErrorInjector<Exception> getReaderErrorInjector = new ErrorInjector<>(// Fail every time.
        count -> true, () -> new DataLogNotAvailableException("intentional getReader exception"));
        dataLog.setReadErrorInjectors(getReaderErrorInjector, null);
        AssertExtensions.assertThrows("No exception or wrong type of exception thrown by getNext() with exception thrown by getReader().", () -> new DataFrameReader<>(dataLog, logItemFactory, CONTAINER_ID), ex -> Exceptions.unwrap(ex) == getReaderErrorInjector.getLastCycleException());
        // Test 2: Failures during getNext().
        ErrorInjector<Exception> readErrorInjector = new ErrorInjector<>(count -> count % failReadSyncEvery == 0, () -> new DataLogNotAvailableException("intentional getNext exception"));
        dataLog.setReadErrorInjectors(null, readErrorInjector);
        testReadWithException(dataLog, logItemFactory, ex -> ex == readErrorInjector.getLastCycleException());
    }
}
Also used : ObjectClosedException(io.pravega.common.ObjectClosedException) Callbacks(io.pravega.common.function.Callbacks) DurableDataLog(io.pravega.segmentstore.storage.DurableDataLog) AssertExtensions(io.pravega.test.common.AssertExtensions) Predicate(java.util.function.Predicate) Collection(java.util.Collection) Exceptions(io.pravega.common.Exceptions) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) ErrorInjector(io.pravega.test.common.ErrorInjector) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) List(java.util.List) Rule(org.junit.Rule) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Duration(java.time.Duration) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) SerializationException(io.pravega.common.io.SerializationException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) lombok.val(lombok.val) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) ErrorInjector(io.pravega.test.common.ErrorInjector) ObjectClosedException(io.pravega.common.ObjectClosedException) IOException(java.io.IOException) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) SerializationException(io.pravega.common.io.SerializationException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) Callbacks(io.pravega.common.function.Callbacks) Test(org.junit.Test)

Example 4 with TestDurableDataLog

use of io.pravega.segmentstore.server.TestDurableDataLog in project pravega by pravega.

the class DurableLogTests method testRecoveryFailures.

/**
 * Tests the DurableLog recovery process in a scenario when there are failures during the process
 * (these may or may not be DataCorruptionExceptions).
 */
@Test
public void testRecoveryFailures() throws Exception {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Fail DataLog reads after X reads.
    int failReadAfter = 2;
    // Setup a DurableLog and start it.
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    HashSet<Long> streamSegmentIds;
    List<OperationWithCompletion> completionFutures;
    // First DurableLog. We use this for generating data.
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
        List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        // Process all generated operations and wait for them to complete
        completionFutures = processOperations(operations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Recovery failure due to DataLog Failures.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // Inject some artificial error into the DataLogRead after a few reads.
        ErrorInjector<Exception> readNextInjector = new ErrorInjector<>(count -> count > failReadAfter, () -> new DataLogNotAvailableException("intentional"));
        dataLog.get().setReadErrorInjectors(null, readNextInjector);
        // Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
        // multiple layers, so we need to dig deep into it.
        AssertExtensions.assertThrows("Recovery did not fail properly when expecting DurableDataLogException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
            if (ex instanceof IllegalStateException) {
                ex = ex.getCause();
            }
            if (ex == null) {
                try {
                    // We need this to enter a FAILED state to get its failure cause.
                    durableLog.awaitTerminated();
                } catch (Exception ex2) {
                    ex = durableLog.failureCause();
                }
            }
            ex = Exceptions.unwrap(ex);
            return ex instanceof DataLogNotAvailableException && ex.getMessage().equals("intentional");
        });
    }
    // Recovery failure due to DataCorruptionException.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // Reset error injectors to nothing.
        dataLog.get().setReadErrorInjectors(null, null);
        AtomicInteger readCounter = new AtomicInteger();
        dataLog.get().setReadInterceptor(readItem -> {
            if (readCounter.incrementAndGet() > failReadAfter && readItem.getLength() > DataFrame.MIN_ENTRY_LENGTH_NEEDED) {
                // Mangle with the payload and overwrite its contents with a DataFrame having a bogus
                // previous sequence number.
                DataFrame df = DataFrame.ofSize(readItem.getLength());
                df.seal();
                ArrayView serialization = df.getData();
                return new InjectedReadItem(serialization.getReader(), serialization.getLength(), readItem.getAddress());
            }
            return readItem;
        });
        // Verify the exception thrown from startAsync() is of the right kind. This exception will be wrapped in
        // multiple layers, so we need to dig deep into it.
        AssertExtensions.assertThrows("Recovery did not fail properly when expecting DataCorruptionException.", () -> durableLog.startAsync().awaitRunning(), ex -> {
            if (ex instanceof IllegalStateException) {
                ex = ex.getCause();
            }
            return Exceptions.unwrap(ex) instanceof DataCorruptionException;
        });
        // Verify that the underlying DurableDataLog has been disabled.
        val disabledDataLog = dataLogFactory.createDurableDataLog(CONTAINER_ID);
        AssertExtensions.assertThrows("DurableDataLog has not been disabled following a recovery failure with DataCorruptionException.", () -> disabledDataLog.initialize(TIMEOUT), ex -> ex instanceof DataLogDisabledException);
    }
}
Also used : TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) DataLogDisabledException(io.pravega.segmentstore.storage.DataLogDisabledException) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) lombok.val(lombok.val) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ErrorInjector(io.pravega.test.common.ErrorInjector) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) TimeoutException(java.util.concurrent.TimeoutException) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ContainerOfflineException(io.pravega.segmentstore.server.ContainerOfflineException) CompletionException(java.util.concurrent.CompletionException) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) DataLogDisabledException(io.pravega.segmentstore.storage.DataLogDisabledException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) Storage(io.pravega.segmentstore.storage.Storage) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) ArrayView(io.pravega.common.util.ArrayView) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Test(org.junit.Test)

Example 5 with TestDurableDataLog

use of io.pravega.segmentstore.server.TestDurableDataLog in project pravega by pravega.

the class DurableLogTests method testTruncateWithoutRecovery.

// endregion
// region Truncation
/**
 * Tests the truncate() method without doing any recovery.
 */
@Test
public void testTruncateWithoutRecovery() {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Setup a DurableLog and start it.
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    @Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
    // First DurableLog. We use this for generating data.
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Hook up a listener to figure out when truncation actually happens.
        dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        HashSet<Long> streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
        List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        // Add one of these at the end to ensure we can truncate everything.
        queuedOperations.add(new MetadataCheckpointOperation());
        List<OperationWithCompletion> completionFutures = processOperations(queuedOperations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Get a list of all the operations, before truncation.
        List<Operation> originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        boolean fullTruncationPossible = false;
        // At the end, verify all operations and all entries in the DataLog were truncated.
        for (int i = 0; i < originalOperations.size(); i++) {
            Operation currentOperation = originalOperations.get(i);
            truncationOccurred.set(false);
            if (currentOperation instanceof MetadataCheckpointOperation) {
                // Need to figure out if the operation we're about to truncate to is actually the first in the log;
                // in that case, we should not be expecting any truncation.
                boolean isTruncationPointFirstOperation = durableLog.read(-1, 1, TIMEOUT).join().next() instanceof MetadataCheckpointOperation;
                // Perform the truncation.
                durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
                if (!isTruncationPointFirstOperation) {
                    Assert.assertTrue("No truncation occurred even though a valid Truncation Point was passed: " + currentOperation.getSequenceNumber(), truncationOccurred.get());
                }
                // Verify all operations up to, and including this one have been removed.
                Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
                Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
                verifyFirstItemIsMetadataCheckpoint(reader);
                if (i < originalOperations.size() - 1) {
                    Operation firstOp = reader.next();
                    OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
                } else {
                    // Sometimes the Truncation Point is on the same DataFrame as other data, and it's the last DataFrame;
                    // In that case, it cannot be truncated, since truncating the frame would mean losing the Checkpoint as well.
                    fullTruncationPossible = !reader.hasNext();
                }
            } else {
                // Verify we are not allowed to truncate on non-valid Truncation Points.
                AssertExtensions.assertThrows("DurableLog allowed truncation on a non-MetadataCheckpointOperation.", () -> durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT), ex -> ex instanceof IllegalArgumentException);
                // Verify the Operation Log is still intact.
                Iterator<Operation> reader = durableLog.read(-1, 1, TIMEOUT).join();
                Assert.assertTrue("No elements left in the log even though no truncation occurred.", reader.hasNext());
                Operation firstOp = reader.next();
                AssertExtensions.assertLessThanOrEqual("It appears that Operations were removed from the Log even though no truncation happened.", currentOperation.getSequenceNumber(), firstOp.getSequenceNumber());
            }
        }
        // Verify that we can still queue operations to the DurableLog and they can be read.
        // In this case we'll just queue some StreamSegmentMapOperations.
        StreamSegmentMapOperation newOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name("foo").build());
        if (!fullTruncationPossible) {
            // We were not able to do a full truncation before. Do one now, since we are guaranteed to have a new DataFrame available.
            MetadataCheckpointOperation lastCheckpoint = new MetadataCheckpointOperation();
            durableLog.add(lastCheckpoint, TIMEOUT).join();
            durableLog.truncate(lastCheckpoint.getSequenceNumber(), TIMEOUT).join();
        }
        durableLog.add(newOp, TIMEOUT).join();
        // Full Checkpoint + Storage Checkpoint (auto-added)+ new op
        final int expectedOperationCount = 3;
        List<Operation> newOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        Assert.assertEquals("Unexpected number of operations added after full truncation.", expectedOperationCount, newOperations.size());
        Assert.assertTrue("Expecting the first operation after full truncation to be a MetadataCheckpointOperation.", newOperations.get(0) instanceof MetadataCheckpointOperation);
        Assert.assertTrue("Expecting a StorageMetadataCheckpointOperation to be auto-added after full truncation.", newOperations.get(1) instanceof StorageMetadataCheckpointOperation);
        Assert.assertEquals("Unexpected Operation encountered after full truncation.", newOp, newOperations.get(2));
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Aggregations

TestDurableDataLog (io.pravega.segmentstore.server.TestDurableDataLog)19 Test (org.junit.Test)19 Cleanup (lombok.Cleanup)15 ErrorInjector (io.pravega.test.common.ErrorInjector)12 IOException (java.io.IOException)12 ObjectClosedException (io.pravega.common.ObjectClosedException)11 ProbeOperation (io.pravega.segmentstore.server.logs.operations.ProbeOperation)11 Operation (io.pravega.segmentstore.server.logs.operations.Operation)10 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)10 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)10 HashSet (java.util.HashSet)10 lombok.val (lombok.val)10 AssertExtensions (io.pravega.test.common.AssertExtensions)9 IntentionalException (io.pravega.test.common.IntentionalException)9 Duration (java.time.Duration)9 ArrayList (java.util.ArrayList)9 List (java.util.List)9 Predicate (java.util.function.Predicate)9 Assert (org.junit.Assert)9 Rule (org.junit.Rule)9