Search in sources :

Example 11 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class DurableLogTests method testRecoveryWithDisabledDataLog.

/**
 * Verifies the ability of hte DurableLog to recover (delayed start) using a disabled DurableDataLog. This verifies
 * the ability to shut down correctly while still waiting for the DataLog to become enabled as well as detecting that
 * it did become enabled and then resume normal operations.
 */
@Test
public void testRecoveryWithDisabledDataLog() throws Exception {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    @Cleanup InMemoryCacheFactory cacheFactory = new InMemoryCacheFactory();
    @Cleanup CacheManager cacheManager = new CacheManager(DEFAULT_READ_INDEX_CONFIG.getCachePolicy(), executorService());
    // Write some data to the log. We'll read it later.
    HashSet<Long> streamSegmentIds;
    List<Operation> originalOperations;
    List<OperationWithCompletion> completionFutures;
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // DurableLog should start properly.
        durableLog.startAsync().awaitRunning();
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, metadata, durableLog, storage);
        List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        completionFutures = processOperations(operations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
    }
    // Disable the DurableDataLog. This requires us to initialize the log, then disable it.
    metadata = new MetadataBuilder(CONTAINER_ID).build();
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // DurableLog should start properly.
        durableLog.startAsync().awaitRunning();
        CompletableFuture<Void> online = durableLog.awaitOnline();
        Assert.assertTrue("awaitOnline() returned an incomplete future.", Futures.isSuccessful(online));
        Assert.assertFalse("Not expecting an offline DurableLog.", durableLog.isOffline());
        dataLog.get().disable();
    }
    // Verify that the DurableLog starts properly and that all operations throw appropriate exceptions.
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // DurableLog should start properly.
        durableLog.startAsync().awaitRunning();
        CompletableFuture<Void> online = durableLog.awaitOnline();
        Assert.assertFalse("awaitOnline() returned a completed future.", online.isDone());
        Assert.assertTrue("Expecting an offline DurableLog.", durableLog.isOffline());
        // Verify all operations fail with the right exception.
        AssertExtensions.assertThrows("add() did not fail with the right exception when offline.", () -> durableLog.add(new ProbeOperation(), TIMEOUT), ex -> ex instanceof ContainerOfflineException);
        AssertExtensions.assertThrows("read() did not fail with the right exception when offline.", () -> durableLog.read(0, 1, TIMEOUT), ex -> ex instanceof ContainerOfflineException);
        AssertExtensions.assertThrows("truncate() did not fail with the right exception when offline.", () -> durableLog.truncate(0, TIMEOUT), ex -> ex instanceof ContainerOfflineException);
        AssertExtensions.assertThrows("operationProcessingBarrier() did not fail with the right exception when offline.", () -> durableLog.operationProcessingBarrier(TIMEOUT), ex -> ex instanceof ContainerOfflineException);
        // Verify we can also shut it down properly from this state.
        durableLog.stopAsync().awaitTerminated();
        Assert.assertTrue("awaitOnline() returned future did not fail when DurableLog shut down.", online.isCompletedExceptionally());
    }
    // Verify that, when the DurableDataLog becomes enabled, the DurableLog can pick up the change and resume normal operations.
    // Verify that the DurableLog starts properly and that all operations throw appropriate exceptions.
    dataLog.set(null);
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, cacheFactory, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        // DurableLog should start properly.
        durableLog.startAsync().awaitRunning();
        CompletableFuture<Void> online = durableLog.awaitOnline();
        Assert.assertFalse("awaitOnline() returned a completed future.", online.isDone());
        // Enable the underlying data log and await for recovery to finish.
        dataLog.get().enable();
        online.get(START_RETRY_DELAY_MILLIS * 100, TimeUnit.MILLISECONDS);
        Assert.assertFalse("Not expecting an offline DurableLog after re-enabling.", durableLog.isOffline());
        // Verify we can still read the data that we wrote before the DataLog was disabled.
        List<Operation> recoveredOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        assertRecoveredOperationsMatch(originalOperations, recoveredOperations);
        performMetadataChecks(streamSegmentIds, new HashSet<>(), new HashMap<>(), completionFutures, metadata, false, false);
        performReadIndexChecks(completionFutures, readIndex);
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) ContainerOfflineException(io.pravega.segmentstore.server.ContainerOfflineException) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) Cleanup(lombok.Cleanup) CacheManager(io.pravega.segmentstore.server.reading.CacheManager) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryCacheFactory(io.pravega.segmentstore.storage.mocks.InMemoryCacheFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Example 12 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class OperationLogTestBase method createStreamSegmentsWithOperations.

/**
 * Creates a number of StreamSegments in the given Metadata and OperationLog.
 */
HashSet<Long> createStreamSegmentsWithOperations(int streamSegmentCount, ContainerMetadata containerMetadata, OperationLog durableLog, Storage storage) {
    StreamSegmentMapper mapper = new StreamSegmentMapper(containerMetadata, durableLog, new InMemoryStateStore(), NO_OP_METADATA_CLEANUP, storage, ForkJoinPool.commonPool());
    HashSet<Long> result = new HashSet<>();
    for (int i = 0; i < streamSegmentCount; i++) {
        String name = getStreamSegmentName(i);
        long streamSegmentId = mapper.createNewStreamSegment(name, null, Duration.ZERO).thenCompose(v -> mapper.getOrAssignStreamSegmentId(name, Duration.ZERO)).join();
        result.add(streamSegmentId);
    }
    return result;
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) AssertExtensions(io.pravega.test.common.AssertExtensions) StreamSegmentMapper(io.pravega.segmentstore.server.containers.StreamSegmentMapper) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) RequiredArgsConstructor(lombok.RequiredArgsConstructor) ContainerMetadata(io.pravega.segmentstore.server.ContainerMetadata) TimeoutException(java.util.concurrent.TimeoutException) Cleanup(lombok.Cleanup) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SequencedItemList(io.pravega.common.util.SequencedItemList) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) ByteArrayInputStream(java.io.ByteArrayInputStream) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) CancellationException(java.util.concurrent.CancellationException) Collection(java.util.Collection) UUID(java.util.UUID) StreamSegmentNameUtils(io.pravega.shared.segment.StreamSegmentNameUtils) List(java.util.List) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) ObjectClosedException(io.pravega.common.ObjectClosedException) IllegalContainerStateException(io.pravega.segmentstore.server.IllegalContainerStateException) Exceptions(io.pravega.common.Exceptions) InMemoryStateStore(io.pravega.segmentstore.server.containers.InMemoryStateStore) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Supplier(java.util.function.Supplier) Iterators(com.google.common.collect.Iterators) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) SequenceInputStream(java.io.SequenceInputStream) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) OperationLog(io.pravega.segmentstore.server.OperationLog) IOException(java.io.IOException) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) ForkJoinPool(java.util.concurrent.ForkJoinPool) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) ReadIndex(io.pravega.segmentstore.server.ReadIndex) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) Collections(java.util.Collections) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) InMemoryStateStore(io.pravega.segmentstore.server.containers.InMemoryStateStore) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamSegmentMapper(io.pravega.segmentstore.server.containers.StreamSegmentMapper) HashSet(java.util.HashSet)

Example 13 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class OperationLogTestBase method createTransactionsWithOperations.

/**
 * Creates a number of Transaction Segments in the given Metadata and OperationLog.
 */
AbstractMap<Long, Long> createTransactionsWithOperations(HashSet<Long> streamSegmentIds, int transactionsPerStreamSegment, ContainerMetadata containerMetadata, OperationLog durableLog, Storage storage) {
    HashMap<Long, Long> result = new HashMap<>();
    StreamSegmentMapper mapper = new StreamSegmentMapper(containerMetadata, durableLog, new InMemoryStateStore(), NO_OP_METADATA_CLEANUP, storage, ForkJoinPool.commonPool());
    for (long streamSegmentId : streamSegmentIds) {
        String streamSegmentName = containerMetadata.getStreamSegmentMetadata(streamSegmentId).getName();
        for (int i = 0; i < transactionsPerStreamSegment; i++) {
            long transactionId = mapper.createNewTransactionStreamSegment(streamSegmentName, UUID.randomUUID(), null, Duration.ZERO).thenCompose(v -> mapper.getOrAssignStreamSegmentId(v, Duration.ZERO)).join();
            result.put(transactionId, streamSegmentId);
        }
    }
    return result;
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) AssertExtensions(io.pravega.test.common.AssertExtensions) StreamSegmentMapper(io.pravega.segmentstore.server.containers.StreamSegmentMapper) ProbeOperation(io.pravega.segmentstore.server.logs.operations.ProbeOperation) RequiredArgsConstructor(lombok.RequiredArgsConstructor) ContainerMetadata(io.pravega.segmentstore.server.ContainerMetadata) TimeoutException(java.util.concurrent.TimeoutException) Cleanup(lombok.Cleanup) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SequencedItemList(io.pravega.common.util.SequencedItemList) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) ByteArrayInputStream(java.io.ByteArrayInputStream) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) Duration(java.time.Duration) Map(java.util.Map) Operation(io.pravega.segmentstore.server.logs.operations.Operation) CancellationException(java.util.concurrent.CancellationException) Collection(java.util.Collection) UUID(java.util.UUID) StreamSegmentNameUtils(io.pravega.shared.segment.StreamSegmentNameUtils) List(java.util.List) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) ObjectClosedException(io.pravega.common.ObjectClosedException) IllegalContainerStateException(io.pravega.segmentstore.server.IllegalContainerStateException) Exceptions(io.pravega.common.Exceptions) InMemoryStateStore(io.pravega.segmentstore.server.containers.InMemoryStateStore) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Supplier(java.util.function.Supplier) Iterators(com.google.common.collect.Iterators) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) SequenceInputStream(java.io.SequenceInputStream) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) OperationLog(io.pravega.segmentstore.server.OperationLog) IOException(java.io.IOException) AtomicLong(java.util.concurrent.atomic.AtomicLong) AbstractMap(java.util.AbstractMap) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) ForkJoinPool(java.util.concurrent.ForkJoinPool) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) ReadIndex(io.pravega.segmentstore.server.ReadIndex) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) Collections(java.util.Collections) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) InMemoryStateStore(io.pravega.segmentstore.server.containers.InMemoryStateStore) HashMap(java.util.HashMap) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamSegmentMapper(io.pravega.segmentstore.server.containers.StreamSegmentMapper)

Example 14 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class ContainerReadIndexTests method testCacheEviction.

/**
 * Tests the ability to evict entries from the ReadIndex under various conditions:
 * * If an entry is aged out
 * * If an entry is pushed out because of cache space pressure.
 * <p>
 * This also verifies that certain entries, such as RedirectReadIndexEntries and entries after the Storage Offset are
 * not removed.
 * <p>
 * The way this test goes is as follows (it's pretty subtle, because there aren't many ways to hook into the ReadIndex and see what it's doing)
 * 1. It creates a bunch of segments, and populates them in storage (each) up to offset N/2-1 (this is called pre-storage)
 * 2. It populates the ReadIndex for each of those segments from offset N/2 to offset N-1 (this is called post-storage)
 * 3. It loads all the data from Storage into the ReadIndex, in entries of size equal to those already loaded in step #2.
 * 3a. At this point, all the entries added in step #2 have Generations 0..A/4-1, and step #3 have generations A/4..A-1
 * 4. Append more data at the end. This forces the generation to increase to 1.25A.
 * 4a. Nothing should be evicted from the cache now, since the earliest items are all post-storage.
 * 5. We 'touch' (read) the first 1/3 of pre-storage entries (offsets 0..N/4).
 * 5a. At this point, those entries (offsets 0..N/6) will have the newest generations (1.25A..1.5A)
 * 6. We append more data (equivalent to the data we touched)
 * 6a. Nothing should be evicted, since those generations that were just eligible for removal were touched and bumped up.
 * 7. We forcefully increase the current generation by 1 (without touching the ReadIndex)
 * 7a. At this point, we expect all the pre-storage items, except the touched ones, to be evicted. This is generations 0.25A-0.75A.
 * 8. Update the metadata and indicate that all the post-storage entries are now pre-storage and bump the generation by 0.75A.
 * 8a. At this point, we expect all former post-storage items and pre-storage items to be evicted (in this order).
 * <p>
 * The final order of eviction (in terms of offsets, for each segment), is:
 * * 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N (remember that we added quite a bunch of items after the initial run).
 */
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testCacheEviction() throws Exception {
    // Create a CachePolicy with a set number of generations and a known max size.
    // Each generation contains exactly one entry, so the number of generations is also the number of entries.
    final int appendSize = 100;
    // This also doubles as number of generations (each generation, we add one append for each segment).
    final int entriesPerSegment = 100;
    final int cacheMaxSize = SEGMENT_COUNT * entriesPerSegment * appendSize;
    // 25% of the entries are beyond the StorageOffset
    final int postStorageEntryCount = entriesPerSegment / 4;
    // 75% of the entries are before the StorageOffset.
    final int preStorageEntryCount = entriesPerSegment - postStorageEntryCount;
    CachePolicy cachePolicy = new CachePolicy(cacheMaxSize, Duration.ofMillis(1000 * 2 * entriesPerSegment), Duration.ofMillis(1000));
    // To properly test this, we want predictable storage reads.
    ReadIndexConfig config = ConfigHelpers.withInfiniteCachePolicy(ReadIndexConfig.builder().with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, appendSize)).build();
    ArrayList<CacheKey> removedKeys = new ArrayList<>();
    @Cleanup TestContext context = new TestContext(config, cachePolicy);
    // Record every cache removal.
    context.cacheFactory.cache.removeCallback = removedKeys::add;
    // Create the segments (metadata + storage).
    ArrayList<Long> segmentIds = createSegments(context);
    createSegmentsInStorage(context);
    // Populate the Storage with appropriate data.
    byte[] preStorageData = new byte[preStorageEntryCount * appendSize];
    for (long segmentId : segmentIds) {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        val handle = context.storage.openWrite(sm.getName()).join();
        context.storage.write(handle, 0, new ByteArrayInputStream(preStorageData), preStorageData.length, TIMEOUT).join();
        sm.setStorageLength(preStorageData.length);
        sm.setLength(preStorageData.length);
    }
    // Callback that appends one entry at the end of the given segment id.
    Consumer<Long> appendOneEntry = segmentId -> {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        byte[] data = new byte[appendSize];
        long offset = sm.getLength();
        sm.setLength(offset + data.length);
        try {
            context.readIndex.append(segmentId, offset, data);
        } catch (StreamSegmentNotExistsException ex) {
            throw new CompletionException(ex);
        }
    };
    // Populate the ReadIndex with the Append entries (post-StorageOffset)
    for (int i = 0; i < postStorageEntryCount; i++) {
        segmentIds.forEach(appendOneEntry);
        // Each time we make a round of appends (one per segment), we increment the generation in the CacheManager.
        context.cacheManager.applyCachePolicy();
    }
    // Read all the data from Storage, making sure we carefully associate them with the proper generation.
    for (int i = 0; i < preStorageEntryCount; i++) {
        long offset = i * appendSize;
        for (long segmentId : segmentIds) {
            @Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
            ReadResultEntry resultEntry = result.next();
            Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Storage, resultEntry.getType());
            resultEntry.requestContent(TIMEOUT);
            ReadResultEntryContents contents = resultEntry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            Assert.assertFalse("Not expecting more data to be available for reading.", result.hasNext());
            Assert.assertEquals("Unexpected ReadResultEntry length when trying to load up data into the ReadIndex Cache.", appendSize, contents.getLength());
        }
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (cache is not full).", 0, removedKeys.size());
    // Append more data (equivalent to all post-storage entries), and verify that NO entries are being evicted (we cannot evict post-storage entries).
    for (int i = 0; i < postStorageEntryCount; i++) {
        segmentIds.forEach(appendOneEntry);
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (only eligible entries were post-storage).", 0, removedKeys.size());
    // 'Touch' the first few entries read from storage. This should move them to the back of the queue (they won't be the first ones to be evicted).
    int touchCount = preStorageEntryCount / 3;
    for (int i = 0; i < touchCount; i++) {
        long offset = i * appendSize;
        for (long segmentId : segmentIds) {
            @Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
            ReadResultEntry resultEntry = result.next();
            Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Cache, resultEntry.getType());
        }
    }
    // Append more data (equivalent to the amount of data we 'touched'), and verify that the entries we just touched are not being removed..
    for (int i = 0; i < touchCount; i++) {
        segmentIds.forEach(appendOneEntry);
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (we touched old entries and they now have the newest generation).", 0, removedKeys.size());
    // Increment the generations so that we are caught up to just before the generation where the "touched" items now live.
    context.cacheManager.applyCachePolicy();
    // We expect all but the 'touchCount' pre-Storage entries to be removed.
    int expectedRemovalCount = (preStorageEntryCount - touchCount) * SEGMENT_COUNT;
    Assert.assertEquals("Unexpected number of removed entries after having forced out all pre-storage entries.", expectedRemovalCount, removedKeys.size());
    // Now update the metadata and indicate that all the post-storage data has been moved to storage.
    segmentIds.forEach(segmentId -> {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        sm.setStorageLength(sm.getLength());
    });
    // We add one artificial entry, which we'll be touching forever and ever; this forces the CacheManager to
    // update its current generation every time. We will be ignoring this entry for our test.
    SegmentMetadata readSegment = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
    appendOneEntry.accept(readSegment.getId());
    // Now evict everything (whether by size of by aging out).
    for (int i = 0; i < cachePolicy.getMaxGenerations(); i++) {
        @Cleanup ReadResult result = context.readIndex.read(readSegment.getId(), readSegment.getLength() - appendSize, appendSize, TIMEOUT);
        result.next();
        context.cacheManager.applyCachePolicy();
    }
    int expectedRemovalCountPerSegment = entriesPerSegment + touchCount + postStorageEntryCount;
    int expectedTotalRemovalCount = SEGMENT_COUNT * expectedRemovalCountPerSegment;
    Assert.assertEquals("Unexpected number of removed entries after having forced out all the entries.", expectedTotalRemovalCount, removedKeys.size());
    // Finally, verify that the evicted items are in the correct order (for each segment). See this test's description for details.
    for (long segmentId : segmentIds) {
        List<CacheKey> segmentRemovedKeys = removedKeys.stream().filter(key -> key.getStreamSegmentId() == segmentId).collect(Collectors.toList());
        Assert.assertEquals("Unexpected number of removed entries for segment " + segmentId, expectedRemovalCountPerSegment, segmentRemovedKeys.size());
        // The correct order of eviction (N=entriesPerSegment) is: 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N.
        // This is equivalent to the following tests
        // 0.25N-1.25N
        checkOffsets(segmentRemovedKeys, segmentId, 0, entriesPerSegment, entriesPerSegment * appendSize / 4, appendSize);
        // 0..0.25N
        checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment, entriesPerSegment / 4, 0, appendSize);
        // 1.25N..1.5N
        checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment + entriesPerSegment / 4, entriesPerSegment / 4, (int) (entriesPerSegment * appendSize * 1.25), appendSize);
    }
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) CacheKey(io.pravega.segmentstore.server.CacheKey) Cleanup(lombok.Cleanup) Random(java.util.Random) InMemoryCache(io.pravega.segmentstore.storage.mocks.InMemoryCache) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) Map(java.util.Map) CacheFactory(io.pravega.segmentstore.storage.CacheFactory) Collection(java.util.Collection) CompletionException(java.util.concurrent.CompletionException) ReadResultEntryType(io.pravega.segmentstore.contracts.ReadResultEntryType) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) StreamSegmentNameUtils(io.pravega.shared.segment.StreamSegmentNameUtils) UncheckedIOException(java.io.UncheckedIOException) List(java.util.List) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ConfigHelpers(io.pravega.segmentstore.server.ConfigHelpers) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) StreamHelpers(io.pravega.common.io.StreamHelpers) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Assert(org.junit.Assert) Collections(java.util.Collections) Cache(io.pravega.segmentstore.storage.Cache) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) ReadResult(io.pravega.segmentstore.contracts.ReadResult) Cleanup(lombok.Cleanup) CacheKey(io.pravega.segmentstore.server.CacheKey) lombok.val(lombok.val) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ByteArrayInputStream(java.io.ByteArrayInputStream) CompletionException(java.util.concurrent.CompletionException) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Example 15 with Storage

use of io.pravega.segmentstore.storage.Storage in project pravega by pravega.

the class ContainerReadIndexTests method testReadDirect.

/**
 * Tests the readDirect() method on the ReadIndex.
 */
@Test
public void testReadDirect() throws Exception {
    final int randomAppendLength = 1024;
    @Cleanup TestContext context = new TestContext();
    ArrayList<Long> segmentIds = new ArrayList<>();
    final long segmentId = createSegment(0, context);
    final UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
    segmentIds.add(segmentId);
    HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, 1, context);
    final long mergedTxId = transactionsBySegment.get(segmentId).get(0);
    // Add data to all segments.
    HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
    transactionsBySegment.values().forEach(segmentIds::addAll);
    appendData(segmentIds, segmentContents, context);
    // Mark everything so far (minus a few bytes) as being written to storage.
    segmentMetadata.setStorageLength(segmentMetadata.getLength() - 100);
    // Now partially merge a second transaction
    final long mergedTxOffset = beginMergeTransaction(mergedTxId, segmentMetadata, segmentContents, context);
    // Add one more append after all of this.
    final long endOfMergedDataOffset = segmentMetadata.getLength();
    byte[] appendData = new byte[randomAppendLength];
    new Random(0).nextBytes(appendData);
    appendSingleWrite(segmentId, appendData, context);
    recordAppend(segmentId, appendData, segmentContents);
    // Verify we are not allowed to read from the range which has already been committed to Storage (invalid arguments).
    for (AtomicLong offset = new AtomicLong(0); offset.get() < segmentMetadata.getStorageLength(); offset.incrementAndGet()) {
        AssertExtensions.assertThrows(String.format("readDirect allowed reading from an illegal offset (%s).", offset), () -> context.readIndex.readDirect(segmentId, offset.get(), 1), ex -> ex instanceof IllegalArgumentException);
    }
    // Verify that any reads overlapping a merged transaction return null (that is, we cannot retrieve the requested data).
    for (long offset = mergedTxOffset - 1; offset < endOfMergedDataOffset; offset++) {
        InputStream resultStream = context.readIndex.readDirect(segmentId, offset, 2);
        Assert.assertNull("readDirect() returned data overlapping a partially merged transaction", resultStream);
    }
    // Verify that we can read from any other offset.
    final byte[] expectedData = segmentContents.get(segmentId).toByteArray();
    BiConsumer<Long, Long> verifyReadResult = (startOffset, endOffset) -> {
        int readLength = (int) (endOffset - startOffset);
        while (readLength > 0) {
            InputStream actualDataStream;
            try {
                actualDataStream = context.readIndex.readDirect(segmentId, startOffset, readLength);
            } catch (StreamSegmentNotExistsException ex) {
                throw new CompletionException(ex);
            }
            Assert.assertNotNull(String.format("Unexpected result when data is readily available for Offset = %s, Length = %s.", startOffset, readLength), actualDataStream);
            byte[] actualData = new byte[readLength];
            try {
                int bytesCopied = StreamHelpers.readAll(actualDataStream, actualData, 0, readLength);
                Assert.assertEquals(String.format("Unexpected number of bytes read for Offset = %s, Length = %s (pre-partial-merge).", startOffset, readLength), readLength, bytesCopied);
            } catch (IOException ex) {
                // Technically not possible.
                throw new UncheckedIOException(ex);
            }
            AssertExtensions.assertArrayEquals("Unexpected data read from the segment at offset " + startOffset, expectedData, startOffset.intValue(), actualData, 0, actualData.length);
            // Setup the read for the next test (where we read 1 less byte than now).
            readLength--;
            if (readLength % 2 == 0) {
                // For every 2 bytes of decreased read length, increase the start offset by 1. This allows for a greater
                // number of combinations to be tested.
                startOffset++;
            }
        }
    };
    // Verify that we can read the cached data just after the StorageLength but before the merged transaction.
    verifyReadResult.accept(segmentMetadata.getStorageLength(), mergedTxOffset);
    // Verify that we can read the cached data just after the merged transaction but before the end of the segment.
    verifyReadResult.accept(endOfMergedDataOffset, segmentMetadata.getLength());
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) CacheKey(io.pravega.segmentstore.server.CacheKey) Cleanup(lombok.Cleanup) Random(java.util.Random) InMemoryCache(io.pravega.segmentstore.storage.mocks.InMemoryCache) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) Map(java.util.Map) CacheFactory(io.pravega.segmentstore.storage.CacheFactory) Collection(java.util.Collection) CompletionException(java.util.concurrent.CompletionException) ReadResultEntryType(io.pravega.segmentstore.contracts.ReadResultEntryType) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) StreamSegmentNameUtils(io.pravega.shared.segment.StreamSegmentNameUtils) UncheckedIOException(java.io.UncheckedIOException) List(java.util.List) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ConfigHelpers(io.pravega.segmentstore.server.ConfigHelpers) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) StreamHelpers(io.pravega.common.io.StreamHelpers) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Assert(org.junit.Assert) Collections(java.util.Collections) Cache(io.pravega.segmentstore.storage.Cache) InputStream(java.io.InputStream) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) HashMap(java.util.HashMap) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) UncheckedIOException(java.io.UncheckedIOException) ByteArrayOutputStream(java.io.ByteArrayOutputStream) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Random(java.util.Random) CompletionException(java.util.concurrent.CompletionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Aggregations

Storage (io.pravega.segmentstore.storage.Storage)32 Test (org.junit.Test)22 lombok.val (lombok.val)18 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)15 Operation (io.pravega.segmentstore.server.logs.operations.Operation)15 Duration (java.time.Duration)15 Cleanup (lombok.Cleanup)15 ByteArrayInputStream (java.io.ByteArrayInputStream)14 Futures (io.pravega.common.concurrent.Futures)13 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)13 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)13 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)13 CompletableFuture (java.util.concurrent.CompletableFuture)13 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)12 Exceptions (io.pravega.common.Exceptions)11 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)11 CompletionException (java.util.concurrent.CompletionException)11 AtomicLong (java.util.concurrent.atomic.AtomicLong)11 InputStream (java.io.InputStream)10 UpdateableContainerMetadata (io.pravega.segmentstore.server.UpdateableContainerMetadata)9