Search in sources :

Example 21 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class IdempotentStorageTestBase method testWrite.

/**
 * Tests the write() method.
 *
 * @throws Exception if an unexpected error occurred.
 */
@Override
@Test(timeout = 30000)
public void testWrite() throws Exception {
    String segmentName = "foo_write";
    int appendCount = 100;
    try (Storage s = createStorage()) {
        s.initialize(DEFAULT_EPOCH);
        s.create(segmentName, TIMEOUT).join();
        // Invalid handle.
        val readOnlyHandle = s.openRead(segmentName).join();
        assertThrows("write() did not throw for read-only handle.", () -> s.write(readOnlyHandle, 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof IllegalArgumentException);
        assertThrows("write() did not throw for handle pointing to inexistent segment.", () -> s.write(createInexistentSegmentHandle(s, false), 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
        val writeHandle = s.openWrite(segmentName).join();
        long offset = 0;
        for (int j = 0; j < appendCount; j++) {
            byte[] writeData = String.format("Segment_%s_Append_%d", segmentName, j).getBytes();
            ByteArrayInputStream dataStream = new ByteArrayInputStream(writeData);
            s.write(writeHandle, offset, dataStream, writeData.length, TIMEOUT).join();
            offset += writeData.length;
        }
        // Check bad offset.
        final long finalOffset = offset;
        assertThrows("write() did not throw bad offset write (larger).", () -> s.write(writeHandle, finalOffset + 1, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof BadOffsetException);
        // Check post-delete write.
        s.delete(writeHandle, TIMEOUT).join();
        assertThrows("write() did not throw for a deleted StreamSegment.", () -> s.write(writeHandle, 0, new ByteArrayInputStream(new byte[1]), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
    }
}
Also used : lombok.val(lombok.val) Storage(io.pravega.segmentstore.storage.Storage) ByteArrayInputStream(java.io.ByteArrayInputStream) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) Test(org.junit.Test)

Example 22 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class FileSystemStorageTest method testWrite.

// region Write tests with metrics checks
/**
 * Tests the write() method.
 *
 * @throws Exception if an unexpected error occurred.
 */
@Override
@Test(timeout = 30000)
public void testWrite() throws Exception {
    String segmentName = "foo_write";
    int appendCount = 100;
    try (Storage s = createStorage()) {
        s.initialize(DEFAULT_EPOCH);
        s.create(segmentName, TIMEOUT).join();
        long expectedMetricsSize = FileSystemMetrics.WRITE_BYTES.get();
        long expectedMetricsSuccesses = FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents();
        // Invalid handle.
        val readOnlyHandle = s.openRead(segmentName).join();
        assertThrows("write() did not throw for read-only handle.", () -> s.write(readOnlyHandle, 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof IllegalArgumentException);
        assertThrows("write() did not throw for handle pointing to inexistent segment.", () -> s.write(createInexistentSegmentHandle(s, false), 0, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
        Assert.assertEquals("WRITE_BYTES should not change in case of unsuccessful writes", expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get());
        Assert.assertEquals("WRITE_LATENCY should not increase the count of successful events in case of unsuccessful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents());
        val writeHandle = s.openWrite(segmentName).join();
        long offset = 0;
        for (int j = 0; j < appendCount; j++) {
            byte[] writeData = String.format("Segment_%s_Append_%d", segmentName, j).getBytes();
            ByteArrayInputStream dataStream = new ByteArrayInputStream(writeData);
            s.write(writeHandle, offset, dataStream, writeData.length, TIMEOUT).join();
            expectedMetricsSize += writeData.length;
            expectedMetricsSuccesses += 1;
            Assert.assertEquals("WRITE_LATENCY should increase the count of successful events in case of successful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents());
            Assert.assertEquals("WRITE_BYTES should increase by the size of successful writes", expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get());
            offset += writeData.length;
        }
        // Check bad offset.
        final long finalOffset = offset;
        assertThrows("write() did not throw bad offset write (larger).", () -> s.write(writeHandle, finalOffset + 1, new ByteArrayInputStream("h".getBytes()), 1, TIMEOUT), ex -> ex instanceof BadOffsetException);
        Assert.assertEquals("WRITE_BYTES should not change in case of unsuccessful writes", expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get());
        Assert.assertEquals("WRITE_LATENCY should not increase the count of successful events in case of unsuccessful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents());
        // Check post-delete write.
        s.delete(writeHandle, TIMEOUT).join();
        assertThrows("write() did not throw for a deleted StreamSegment.", () -> s.write(writeHandle, 0, new ByteArrayInputStream(new byte[1]), 1, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
        Assert.assertEquals("WRITE_BYTES should not change in case of unsuccessful writes", expectedMetricsSize, FileSystemMetrics.WRITE_BYTES.get());
        Assert.assertEquals("WRITE_LATENCY should not increase the count of successful events in case of unsuccessful writes", expectedMetricsSuccesses, FileSystemMetrics.WRITE_LATENCY.toOpStatsData().getNumSuccessfulEvents());
    }
}
Also used : lombok.val(lombok.val) Storage(io.pravega.segmentstore.storage.Storage) ByteArrayInputStream(java.io.ByteArrayInputStream) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) Test(org.junit.Test)

Example 23 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class RollingStorageTestBase method testSuccessiveConcats.

/**
 * Tests a scenario that would concatenate various segments successively into an initially empty segment while not
 * producing an excessive number of chunks. The initial concat will use header merge since the segment has no chunks,
 * but successive concats should unseal that last chunk and concat to it using the native method.
 * <p>
 * NOTE: this could be moved down into RollingStorageTests.java, however it being here ensures that unseal() is being
 * exercised in all classes that derive from this, which is all of the Storage implementations.
 *
 * @throws Exception If one occurred.
 */
@Test
public void testSuccessiveConcats() throws Exception {
    final String segmentName = "Segment";
    final int writeLength = 21;
    final int concatCount = 10;
    @Cleanup val s = createStorage();
    s.initialize(1);
    // Create Target Segment with infinite rolling. Do not write anything to it yet.
    val writeHandle = s.create(segmentName, SegmentRollingPolicy.NO_ROLLING, TIMEOUT).thenCompose(v -> s.openWrite(segmentName)).join();
    final Random rnd = new Random(0);
    byte[] writeBuffer = new byte[writeLength];
    val writeStream = new ByteArrayOutputStream();
    for (int i = 0; i < concatCount; i++) {
        // Create a source segment, write a little bit to it, then seal & merge it.
        String sourceSegment = segmentName + "_Source_" + i;
        val sourceHandle = s.create(sourceSegment, TIMEOUT).thenCompose(v -> s.openWrite(sourceSegment)).join();
        rnd.nextBytes(writeBuffer);
        s.write(sourceHandle, 0, new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).join();
        s.seal(sourceHandle, TIMEOUT).join();
        s.concat(writeHandle, writeStream.size(), sourceSegment, TIMEOUT).join();
        writeStream.write(writeBuffer);
    }
    // Write directly to the target segment - this ensures that writes themselves won't create a new chunk if the
    // write can still fit into the last chunk.
    rnd.nextBytes(writeBuffer);
    s.write(writeHandle, writeStream.size(), new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).join();
    writeStream.write(writeBuffer);
    // Get a read handle, which will also fetch the number of chunks for us.
    val readHandle = (RollingSegmentHandle) s.openRead(segmentName).join();
    Assert.assertEquals("Unexpected number of chunks created.", 1, readHandle.chunks().size());
    val writtenData = writeStream.toByteArray();
    byte[] readBuffer = new byte[writtenData.length];
    int bytesRead = s.read(readHandle, 0, readBuffer, 0, readBuffer.length, TIMEOUT).join();
    Assert.assertEquals("Unexpected number of bytes read.", readBuffer.length, bytesRead);
    Assert.assertArrayEquals("Unexpected data read back.", writtenData, readBuffer);
}
Also used : lombok.val(lombok.val) Storage(io.pravega.segmentstore.storage.Storage) ByteArrayInputStream(java.io.ByteArrayInputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) StorageTestBase(io.pravega.segmentstore.storage.StorageTestBase) SegmentRollingPolicy(io.pravega.segmentstore.storage.SegmentRollingPolicy) SyncStorage(io.pravega.segmentstore.storage.SyncStorage) lombok.val(lombok.val) AsyncStorageWrapper(io.pravega.segmentstore.storage.AsyncStorageWrapper) Cleanup(lombok.Cleanup) Random(java.util.Random) Test(org.junit.Test) Assert(org.junit.Assert) Random(java.util.Random) ByteArrayInputStream(java.io.ByteArrayInputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 24 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class StreamSegmentContainerTests method testFutureReads.

/**
 * Tests the ability to perform future (tail) reads. Scenarios tested include:
 * * Regular appends
 * * Segment sealing
 * * Transaction merging.
 */
@Test
public void testFutureReads() throws Exception {
    final int nonSealReadLimit = 100;
    @Cleanup TestContext context = new TestContext();
    context.container.startAsync().awaitRunning();
    // 1. Create the StreamSegments.
    ArrayList<String> segmentNames = createSegments(context);
    HashMap<String, ArrayList<String>> transactionsBySegment = createTransactions(segmentNames, context);
    activateAllSegments(segmentNames, context);
    transactionsBySegment.values().forEach(s -> activateAllSegments(s, context));
    HashMap<String, ReadResult> readsBySegment = new HashMap<>();
    ArrayList<AsyncReadResultProcessor> readProcessors = new ArrayList<>();
    HashSet<String> segmentsToSeal = new HashSet<>();
    HashMap<String, ByteArrayOutputStream> readContents = new HashMap<>();
    HashMap<String, TestReadResultHandler> entryHandlers = new HashMap<>();
    // should stop upon reaching the limit).
    for (int i = 0; i < segmentNames.size(); i++) {
        String segmentName = segmentNames.get(i);
        ByteArrayOutputStream readContentsStream = new ByteArrayOutputStream();
        readContents.put(segmentName, readContentsStream);
        ReadResult readResult;
        if (i < segmentNames.size() / 2) {
            // We're going to seal this one at one point.
            segmentsToSeal.add(segmentName);
            readResult = context.container.read(segmentName, 0, Integer.MAX_VALUE, TIMEOUT).join();
        } else {
            // Just a regular one, nothing special.
            readResult = context.container.read(segmentName, 0, nonSealReadLimit, TIMEOUT).join();
        }
        // The Read callback is only accumulating data in this test; we will then compare it against the real data.
        TestReadResultHandler entryHandler = new TestReadResultHandler(readContentsStream, TIMEOUT);
        entryHandlers.put(segmentName, entryHandler);
        readsBySegment.put(segmentName, readResult);
        readProcessors.add(AsyncReadResultProcessor.process(readResult, entryHandler, executorService()));
    }
    // 3. Add some appends.
    HashMap<String, Long> lengths = new HashMap<>();
    HashMap<String, ByteArrayOutputStream> segmentContents = new HashMap<>();
    appendToParentsAndTransactions(segmentNames, transactionsBySegment, lengths, segmentContents, context);
    // 4. Merge all the Transactions.
    mergeTransactions(transactionsBySegment, lengths, segmentContents, context);
    // 5. Add more appends (to the parent segments)
    ArrayList<CompletableFuture<Void>> operationFutures = new ArrayList<>();
    for (int i = 0; i < 5; i++) {
        for (String segmentName : segmentNames) {
            byte[] appendData = getAppendData(segmentName, APPENDS_PER_SEGMENT + i);
            operationFutures.add(context.container.append(segmentName, appendData, null, TIMEOUT));
            lengths.put(segmentName, lengths.getOrDefault(segmentName, 0L) + appendData.length);
            recordAppend(segmentName, appendData, segmentContents);
        }
    }
    segmentsToSeal.forEach(segmentName -> operationFutures.add(Futures.toVoid(context.container.sealStreamSegment(segmentName, TIMEOUT))));
    Futures.allOf(operationFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // Now wait for all the reads to complete, and verify their results against the expected output.
    Futures.allOf(entryHandlers.values().stream().map(h -> h.getCompleted()).collect(Collectors.toList())).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    readProcessors.forEach(AsyncReadResultProcessor::close);
    // Check to see if any errors got thrown (and caught) during the reading process).
    for (Map.Entry<String, TestReadResultHandler> e : entryHandlers.entrySet()) {
        Throwable err = e.getValue().getError().get();
        if (err != null) {
            // The next check (see below) will verify if the segments were properly read).
            if (!(err instanceof StreamSegmentSealedException && segmentsToSeal.contains(e.getKey()))) {
                Assert.fail("Unexpected error happened while processing Segment " + e.getKey() + ": " + e.getValue().getError().get());
            }
        }
    }
    // Check that all the ReadResults are closed
    for (Map.Entry<String, ReadResult> e : readsBySegment.entrySet()) {
        Assert.assertTrue("Read result is not closed for segment " + e.getKey(), e.getValue().isClosed());
    }
    // Compare, byte-by-byte, the outcome of the tail reads.
    Assert.assertEquals("Unexpected number of segments were read.", segmentContents.size(), readContents.size());
    for (String segmentName : segmentNames) {
        boolean isSealed = segmentsToSeal.contains(segmentName);
        byte[] expectedData = segmentContents.get(segmentName).toByteArray();
        byte[] actualData = readContents.get(segmentName).toByteArray();
        int expectedLength = isSealed ? (int) (long) lengths.get(segmentName) : nonSealReadLimit;
        Assert.assertEquals("Unexpected read length for segment " + segmentName, expectedLength, actualData.length);
        AssertExtensions.assertArrayEquals("Unexpected read contents for segment " + segmentName, expectedData, 0, actualData, 0, actualData.length);
    }
    // 6. Writer moving data to Storage.
    waitForSegmentsInStorage(segmentNames, context).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    checkStorage(segmentContents, lengths, context);
}
Also used : Arrays(java.util.Arrays) Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) Cleanup(lombok.Cleanup) StorageWriterFactory(io.pravega.segmentstore.server.writer.StorageWriterFactory) Future(java.util.concurrent.Future) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) Map(java.util.Map) AsyncReadResultProcessor(io.pravega.segmentstore.server.reading.AsyncReadResultProcessor) ContainerReadIndexFactory(io.pravega.segmentstore.server.reading.ContainerReadIndexFactory) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) StreamSegmentStore(io.pravega.segmentstore.contracts.StreamSegmentStore) DurableLogFactory(io.pravega.segmentstore.server.logs.DurableLogFactory) Attributes(io.pravega.segmentstore.contracts.Attributes) DurableLogConfig(io.pravega.segmentstore.server.logs.DurableLogConfig) Writer(io.pravega.segmentstore.server.Writer) SegmentContainerFactory(io.pravega.segmentstore.server.SegmentContainerFactory) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) SyncStorage(io.pravega.segmentstore.storage.SyncStorage) Futures(io.pravega.common.concurrent.Futures) ByteArrayOutputStream(java.io.ByteArrayOutputStream) TooManyActiveSegmentsException(io.pravega.segmentstore.contracts.TooManyActiveSegmentsException) Exceptions(io.pravega.common.Exceptions) StorageFactory(io.pravega.segmentstore.storage.StorageFactory) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) Runnables(com.google.common.util.concurrent.Runnables) ReadIndexConfig(io.pravega.segmentstore.server.reading.ReadIndexConfig) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Timeout(org.junit.rules.Timeout) ConfigurationException(io.pravega.common.util.ConfigurationException) StreamHelpers(io.pravega.common.io.StreamHelpers) WriterFactory(io.pravega.segmentstore.server.WriterFactory) Properties(java.util.Properties) DurableDataLog(io.pravega.segmentstore.storage.DurableDataLog) Executor(java.util.concurrent.Executor) lombok.val(lombok.val) OperationLog(io.pravega.segmentstore.server.OperationLog) Test(org.junit.Test) Service(com.google.common.util.concurrent.Service) AtomicLong(java.util.concurrent.atomic.AtomicLong) OperationLogFactory(io.pravega.segmentstore.server.OperationLogFactory) SegmentContainer(io.pravega.segmentstore.server.SegmentContainer) Assert(org.junit.Assert) WriterConfig(io.pravega.segmentstore.server.writer.WriterConfig) SneakyThrows(lombok.SneakyThrows) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) TimeoutException(java.util.concurrent.TimeoutException) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) ReadIndexFactory(io.pravega.segmentstore.server.ReadIndexFactory) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) AbstractService(com.google.common.util.concurrent.AbstractService) CacheFactory(io.pravega.segmentstore.storage.CacheFactory) ServiceListeners(io.pravega.segmentstore.server.ServiceListeners) ContainerOfflineException(io.pravega.segmentstore.server.ContainerOfflineException) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ReadResultEntryType(io.pravega.segmentstore.contracts.ReadResultEntryType) UUID(java.util.UUID) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) Collectors(java.util.stream.Collectors) SegmentMetadataComparer(io.pravega.segmentstore.server.SegmentMetadataComparer) StreamSegmentNameUtils(io.pravega.shared.segment.StreamSegmentNameUtils) List(java.util.List) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) DurableDataLogFactory(io.pravega.segmentstore.storage.DurableDataLogFactory) ReadResult(io.pravega.segmentstore.contracts.ReadResult) Setter(lombok.Setter) Getter(lombok.Getter) ConfigHelpers(io.pravega.segmentstore.server.ConfigHelpers) AsyncStorageWrapper(io.pravega.segmentstore.storage.AsyncStorageWrapper) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) HashSet(java.util.HashSet) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) ExecutorService(java.util.concurrent.ExecutorService) ExecutorServiceHelpers.newScheduledThreadPool(io.pravega.common.concurrent.ExecutorServiceHelpers.newScheduledThreadPool) TimeoutTimer(io.pravega.common.TimeoutTimer) RollingStorage(io.pravega.segmentstore.storage.rolling.RollingStorage) IntentionalException(io.pravega.test.common.IntentionalException) StreamSegmentMergedException(io.pravega.segmentstore.contracts.StreamSegmentMergedException) TestReadResultHandler(io.pravega.segmentstore.server.reading.TestReadResultHandler) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) Rule(org.junit.Rule) TypedProperties(io.pravega.common.util.TypedProperties) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) ReadIndex(io.pravega.segmentstore.server.ReadIndex) Collections(java.util.Collections) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ReadResult(io.pravega.segmentstore.contracts.ReadResult) Cleanup(lombok.Cleanup) CompletableFuture(java.util.concurrent.CompletableFuture) TestReadResultHandler(io.pravega.segmentstore.server.reading.TestReadResultHandler) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) HashSet(java.util.HashSet) ByteArrayOutputStream(java.io.ByteArrayOutputStream) AsyncReadResultProcessor(io.pravega.segmentstore.server.reading.AsyncReadResultProcessor) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) Test(org.junit.Test)

Example 25 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class DurableLogTests method testTruncateWithRecovery.

/**
 * Tests the truncate() method while performing recovery.
 */
@Test
public void testTruncateWithRecovery() {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Setup a DurableLog and start it.
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    @Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
    HashSet<Long> streamSegmentIds;
    List<OperationWithCompletion> completionFutures;
    List<Operation> originalOperations;
    // First DurableLog. We use this for generating data.
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
        List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        completionFutures = processOperations(queuedOperations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Get a list of all the operations, before any truncation.
        originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Truncate up to each MetadataCheckpointOperation and:
    // * If the DataLog was truncated:
    // ** Shut down DurableLog, re-start it (recovery) and verify the operations are as they should.
    // At the end, verify all operations and all entries in the DataLog were truncated.
    DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
    try {
        durableLog.startAsync().awaitRunning();
        dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
        for (int i = 0; i < originalOperations.size(); i++) {
            Operation currentOperation = originalOperations.get(i);
            if (!(currentOperation instanceof MetadataCheckpointOperation)) {
                // We can only truncate on MetadataCheckpointOperations.
                continue;
            }
            truncationOccurred.set(false);
            durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
            if (truncationOccurred.get()) {
                // Close current DurableLog and start a brand new one, forcing recovery.
                durableLog.close();
                durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
                durableLog.startAsync().awaitRunning();
                dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
                // Verify all operations up to, and including this one have been removed.
                Iterator<Operation> reader = durableLog.read(-1, 2, TIMEOUT).join();
                Assert.assertTrue("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.hasNext());
                verifyFirstItemIsMetadataCheckpoint(reader);
                if (i < originalOperations.size() - 1) {
                    Operation firstOp = reader.next();
                    OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
                }
            }
        }
    } finally {
        // This closes whatever current instance this variable refers to, not necessarily the first one.
        durableLog.close();
    }
}
Also used : TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Aggregations

Storage (io.pravega.segmentstore.storage.Storage)32 Test (org.junit.Test)22 lombok.val (lombok.val)18 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)15 Operation (io.pravega.segmentstore.server.logs.operations.Operation)15 Duration (java.time.Duration)15 Cleanup (lombok.Cleanup)15 ByteArrayInputStream (java.io.ByteArrayInputStream)14 Futures (io.pravega.common.concurrent.Futures)13 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)13 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)13 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)13 CompletableFuture (java.util.concurrent.CompletableFuture)13 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)12 Exceptions (io.pravega.common.Exceptions)11 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)11 CompletionException (java.util.concurrent.CompletionException)11 AtomicLong (java.util.concurrent.atomic.AtomicLong)11 InputStream (java.io.InputStream)10 UpdateableContainerMetadata (io.pravega.segmentstore.server.UpdateableContainerMetadata)9