Search in sources :

Example 6 with DirectMemoryCache

use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.

the class CacheManagerTests method testCleanupListeners.

/**
 * Tests the ability to register, invoke and auto-unregister {@link ThrottleSourceListener} instances.
 */
@Test
public void testCleanupListeners() {
    final CachePolicy policy = new CachePolicy(1024, Duration.ofHours(1), Duration.ofHours(1));
    @Cleanup val cache = new DirectMemoryCache(policy.getMaxSize());
    @Cleanup TestCacheManager cm = new TestCacheManager(policy, cache, executorService());
    TestClient client = new TestClient();
    cm.register(client);
    TestCleanupListener l1 = new TestCleanupListener();
    TestCleanupListener l2 = new TestCleanupListener();
    cm.getUtilizationProvider().registerCleanupListener(l1);
    cm.getUtilizationProvider().registerCleanupListener(l2);
    // We always remove something.
    client.setUpdateGenerationsImpl((current, oldest, essentialOnly) -> true);
    // In the first iteration, we should invoke both listeners.
    client.setCacheStatus(0, 0);
    // Put something in the cache so the cleanup can execute.
    cache.insert(new ByteArraySegment(new byte[1]));
    cm.runOneIteration();
    Assert.assertEquals("Expected cleanup listener to be invoked the first time.", 1, l1.getCallCount());
    Assert.assertEquals("Expected cleanup listener to be invoked the first time.", 1, l2.getCallCount());
    // Close one of the listeners, and verify that only the other one is invoked now.
    l2.setClosed(true);
    client.setCacheStatus(0, 1);
    cm.runOneIteration();
    Assert.assertEquals("Expected cleanup listener to be invoked the second time.", 2, l1.getCallCount());
    Assert.assertEquals("Not expecting cleanup listener to be invoked the second time for closed listener.", 1, l2.getCallCount());
    // This should have no effect.
    cm.getUtilizationProvider().registerCleanupListener(l2);
}
Also used : lombok.val(lombok.val) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) ByteArraySegment(io.pravega.common.util.ByteArraySegment) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 7 with DirectMemoryCache

use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.

the class CacheManagerTests method testCacheFullCleanup.

/**
 * Tests the ability to auto-cleanup the cache if it indicates it has reached capacity and needs some eviction(s)
 * in order to accommodate more data.
 */
@Test
public void testCacheFullCleanup() {
    final CachePolicy policy = new CachePolicy(1024, Duration.ofHours(1), Duration.ofHours(1));
    @Cleanup val cache = new DirectMemoryCache(policy.getMaxSize());
    int maxCacheSize = (int) cache.getState().getMaxBytes();
    @Cleanup TestCacheManager cm = new TestCacheManager(policy, cache, executorService());
    TestClient client = new TestClient();
    cm.register(client);
    // Almost fill up the cache.
    int length1 = maxCacheSize / 2;
    val write1 = cache.insert(new ByteArraySegment(new byte[length1]));
    // Setup the TestClient to evict write1 when requested.
    val cleanupRequestCount = new AtomicInteger(0);
    client.setCacheStatus(0, 1);
    client.setUpdateGenerationsImpl((ng, og, essentialOnly) -> {
        cleanupRequestCount.incrementAndGet();
        cache.delete(write1);
        return true;
    });
    // Insert an entry that would fill up the cache.
    int length2 = maxCacheSize / 2 + 1;
    val write2 = cache.insert(new ByteArraySegment(new byte[length2]));
    // Verify we were asked to cleanup.
    Assert.assertEquals("Unexpected number of cleanup requests.", 1, cleanupRequestCount.get());
    Assert.assertEquals("New entry was not inserted.", length2, cache.get(write2).getLength());
    Assert.assertEquals("Unexpected number of stored bytes.", length2, cache.getState().getStoredBytes());
}
Also used : lombok.val(lombok.val) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) ByteArraySegment(io.pravega.common.util.ByteArraySegment) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 8 with DirectMemoryCache

use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.

the class CacheManagerTests method testApplyPolicyConcurrency.

/**
 * Tests the ability to handle concurrent requests to {@link  CacheManager#applyCachePolicy()}.
 */
@Test
public void testApplyPolicyConcurrency() throws Exception {
    // Almost fill up the cache.
    final CachePolicy policy = new CachePolicy(1024, Duration.ofHours(1), Duration.ofHours(1));
    @Cleanup val cache = new DirectMemoryCache(policy.getMaxSize());
    int maxCacheSize = (int) cache.getState().getMaxBytes();
    @Cleanup TestCacheManager cm = new TestCacheManager(policy, cache, executorService());
    TestClient client = new TestClient();
    cm.register(client);
    // Almost fill up the cache (75%)
    int initialLength = maxCacheSize * 3 / 4;
    val initialWrite = cache.insert(new ByteArraySegment(new byte[initialLength]));
    // Setup the TestClient to evict write1 when requested.
    val firstCleanupRequested = new CompletableFuture<Void>();
    val firstCleanupBlock = new CompletableFuture<Void>();
    val cleanupRequestCount = new AtomicInteger(0);
    val concurrentRequest = new AtomicBoolean(false);
    client.setCacheStatus(0, 1);
    client.setUpdateGenerationsImpl((ng, og, essentialOnly) -> {
        int rc = cleanupRequestCount.incrementAndGet();
        if (rc == 1) {
            // This is the first concurrent request requesting a cleanup.
            // Notify that cleanup has been requested.
            firstCleanupRequested.complete(null);
            // Wait until we are ready to proceed.
            firstCleanupBlock.join();
            // We only need to delete this once.
            cache.delete(initialWrite);
        } else {
            // This is the second concurrent request requesting a cleanup.
            if (!firstCleanupBlock.isDone()) {
                // This has executed before the first request completed.
                concurrentRequest.set(true);
            }
        }
        return true;
    });
    // Send one write that would end up filling the cache.
    int length1 = maxCacheSize / 3;
    val write1Future = CompletableFuture.supplyAsync(() -> cache.insert(new ByteArraySegment(new byte[length1])), executorService());
    // Wait for the cleanup to be requested.
    firstCleanupRequested.get(CLEANUP_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
    // Send another write that would also fill up the cache.
    int length2 = length1 + 1;
    val write2Future = CompletableFuture.supplyAsync(() -> cache.insert(new ByteArraySegment(new byte[length2])), executorService());
    // Unblock the first cleanup.
    firstCleanupBlock.complete(null);
    // Get the results of the two suspended writes.
    val write1 = write1Future.get(CLEANUP_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
    val write2 = write2Future.get(CLEANUP_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
    // Verify that things did work as intended.
    Assert.assertFalse("Concurrent call to applyCachePolicy detected.", concurrentRequest.get());
    AssertExtensions.assertGreaterThanOrEqual("Unexpected number of cleanup requests.", 1, cleanupRequestCount.get());
    Assert.assertEquals("Unexpected entry #2.", length1, cache.get(write1).getLength());
    Assert.assertEquals("Unexpected entry #3.", length2, cache.get(write2).getLength());
}
Also used : lombok.val(lombok.val) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) CompletableFuture(java.util.concurrent.CompletableFuture) ByteArraySegment(io.pravega.common.util.ByteArraySegment) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 9 with DirectMemoryCache

use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.

the class DurableLogTests method testRecoveryWithIncrementalCheckpoints.

/**
 * Tests the DurableLog recovery process when there are multiple {@link MetadataCheckpointOperation}s added, with each
 * such checkpoint including information about evicted segments or segments which had their storage state modified.
 */
@Test
public void testRecoveryWithIncrementalCheckpoints() throws Exception {
    final int streamSegmentCount = 50;
    // Setup a DurableLog and start it.
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    // First DurableLog. We use this for generating data.
    val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
    @Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
    List<Long> deletedIds;
    Set<Long> evictIds;
    try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Create some segments.
        val segmentIds = new ArrayList<>(createStreamSegmentsWithOperations(streamSegmentCount, durableLog));
        deletedIds = segmentIds.subList(0, 5);
        val mergedFromIds = segmentIds.subList(5, 10);
        // Must be same length as mergeFrom
        val mergedToIds = segmentIds.subList(10, 15);
        evictIds = new HashSet<>(segmentIds.subList(15, 20));
        val changeStorageStateIds = segmentIds.subList(20, segmentIds.size() - 5);
        // Append something to each segment.
        for (val segmentId : segmentIds) {
            if (!evictIds.contains(segmentId)) {
                durableLog.add(new StreamSegmentAppendOperation(segmentId, generateAppendData((int) (long) segmentId), null), OperationPriority.Normal, TIMEOUT).join();
            }
        }
        // Checkpoint 1.
        durableLog.checkpoint(TIMEOUT).join();
        // Delete some segments.
        for (val segmentId : deletedIds) {
            durableLog.add(new DeleteSegmentOperation(segmentId), OperationPriority.Normal, TIMEOUT).join();
        }
        // Checkpoint 2.
        durableLog.checkpoint(TIMEOUT).join();
        // Merge some segments.
        for (int i = 0; i < mergedFromIds.size(); i++) {
            durableLog.add(new StreamSegmentSealOperation(mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
            durableLog.add(new MergeSegmentOperation(mergedToIds.get(i), mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
        }
        // Checkpoint 3.
        durableLog.checkpoint(TIMEOUT).join();
        // Evict some segments.
        val evictableContainerMetadata = (EvictableMetadata) metadata1;
        metadata1.removeTruncationMarkers(metadata1.getOperationSequenceNumber());
        val toEvict = evictableContainerMetadata.getEvictionCandidates(Integer.MAX_VALUE, segmentIds.size()).stream().filter(m -> evictIds.contains(m.getId())).collect(Collectors.toList());
        val evicted = evictableContainerMetadata.cleanup(toEvict, Integer.MAX_VALUE);
        AssertExtensions.assertContainsSameElements("", evictIds, evicted.stream().map(SegmentMetadata::getId).collect(Collectors.toList()));
        // Checkpoint 4.
        durableLog.checkpoint(TIMEOUT).join();
        // Update storage state for some segments.
        for (val segmentId : changeStorageStateIds) {
            val sm = metadata1.getStreamSegmentMetadata(segmentId);
            if (segmentId % 3 == 0) {
                sm.setStorageLength(sm.getLength());
            }
            if (segmentId % 4 == 0) {
                sm.markSealed();
                sm.markSealedInStorage();
            }
            if (segmentId % 5 == 0) {
                sm.markDeleted();
                sm.markDeletedInStorage();
            }
        }
        // Checkpoint 5.
        durableLog.checkpoint(TIMEOUT).join();
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Second DurableLog. We use this for recovery.
    val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
    try (ContainerReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, storage, cacheManager, executorService());
        DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Validate metadata matches.
        val expectedSegmentIds = metadata1.getAllStreamSegmentIds();
        val actualSegmentIds = metadata2.getAllStreamSegmentIds();
        AssertExtensions.assertContainsSameElements("Unexpected set of recovered segments. Only Active segments expected to have been recovered.", expectedSegmentIds, actualSegmentIds);
        val expectedSegments = expectedSegmentIds.stream().sorted().map(metadata1::getStreamSegmentMetadata).collect(Collectors.toList());
        val actualSegments = actualSegmentIds.stream().sorted().map(metadata2::getStreamSegmentMetadata).collect(Collectors.toList());
        for (int i = 0; i < expectedSegments.size(); i++) {
            val e = expectedSegments.get(i);
            val a = actualSegments.get(i);
            SegmentMetadataComparer.assertEquals("Recovered segment metadata mismatch", e, a);
        }
        // Validate read index is as it should. Here, we can only check if the read indices for evicted segments are
        // no longer loaded; we do more thorough checks in the ContainerReadIndexTests suite.
        Streams.concat(evictIds.stream(), deletedIds.stream()).forEach(segmentId -> Assert.assertNull("Not expecting a read index for an evicted or deleted segment.", readIndex.getIndex(segmentId)));
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentInformation(io.pravega.segmentstore.contracts.StreamSegmentInformation) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) OperationPriority(io.pravega.segmentstore.server.logs.operations.OperationPriority) SneakyThrows(lombok.SneakyThrows) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) AssertExtensions(io.pravega.test.common.AssertExtensions) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) TimeoutException(java.util.concurrent.TimeoutException) Cleanup(lombok.Cleanup) LogAddress(io.pravega.segmentstore.storage.LogAddress) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) DataLogNotAvailableException(io.pravega.segmentstore.storage.DataLogNotAvailableException) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CheckpointOperationBase(io.pravega.segmentstore.server.logs.operations.CheckpointOperationBase) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) CachePolicy(io.pravega.segmentstore.server.CachePolicy) Operation(io.pravega.segmentstore.server.logs.operations.Operation) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ServiceListeners(io.pravega.segmentstore.server.ServiceListeners) ContainerOfflineException(io.pravega.segmentstore.server.ContainerOfflineException) Predicate(java.util.function.Predicate) Collection(java.util.Collection) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) Streams(com.google.common.collect.Streams) DataLogWriterNotPrimaryException(io.pravega.segmentstore.storage.DataLogWriterNotPrimaryException) Collectors(java.util.stream.Collectors) SegmentMetadataComparer(io.pravega.segmentstore.server.SegmentMetadataComparer) ErrorInjector(io.pravega.test.common.ErrorInjector) List(java.util.List) ByteArraySegment(io.pravega.common.util.ByteArraySegment) StreamSegmentContainerMetadata(io.pravega.segmentstore.server.containers.StreamSegmentContainerMetadata) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) TestUtils(io.pravega.test.common.TestUtils) Queue(java.util.Queue) Futures(io.pravega.common.concurrent.Futures) CacheManager(io.pravega.segmentstore.server.CacheManager) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) StreamSegmentException(io.pravega.segmentstore.contracts.StreamSegmentException) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) EvictableMetadata(io.pravega.segmentstore.server.EvictableMetadata) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadIndexConfig(io.pravega.segmentstore.server.reading.ReadIndexConfig) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Timeout(org.junit.rules.Timeout) DurableDataLogException(io.pravega.segmentstore.storage.DurableDataLogException) OperationComparer(io.pravega.segmentstore.server.logs.operations.OperationComparer) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) DataLogDisabledException(io.pravega.segmentstore.storage.DataLogDisabledException) Iterator(java.util.Iterator) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) OperationLog(io.pravega.segmentstore.server.OperationLog) IOException(java.io.IOException) Test(org.junit.Test) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) Service(com.google.common.util.concurrent.Service) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) TimeUnit(java.util.concurrent.TimeUnit) AbstractMap(java.util.AbstractMap) Rule(org.junit.Rule) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Data(lombok.Data) ReadIndex(io.pravega.segmentstore.server.ReadIndex) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) Assert(org.junit.Assert) Collections(java.util.Collections) CompositeArrayView(io.pravega.common.util.CompositeArrayView) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) ArrayList(java.util.ArrayList) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Cleanup(lombok.Cleanup) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) EvictableMetadata(io.pravega.segmentstore.server.EvictableMetadata) CacheManager(io.pravega.segmentstore.server.CacheManager) lombok.val(lombok.val) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Storage(io.pravega.segmentstore.storage.Storage) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Example 10 with DirectMemoryCache

use of io.pravega.segmentstore.storage.cache.DirectMemoryCache in project pravega by pravega.

the class DurableLogTests method testTruncateWithRecovery.

/**
 * Tests the truncate() method while performing recovery.
 */
@Test
public void testTruncateWithRecovery() {
    int streamSegmentCount = 50;
    int appendsPerStreamSegment = 20;
    // Setup a DurableLog and start it.
    AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
    AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
    @Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
    @Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
    storage.initialize(1);
    UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
    @Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
    @Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
    @Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
    Set<Long> streamSegmentIds;
    List<OperationWithCompletion> completionFutures;
    List<Operation> originalOperations;
    // First DurableLog. We use this for generating data.
    try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
        durableLog.startAsync().awaitRunning();
        // Generate some test data (we need to do this after we started the DurableLog because in the process of
        // recovery, it wipes away all existing metadata).
        streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, durableLog);
        List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
        completionFutures = processOperations(queuedOperations, durableLog);
        OperationWithCompletion.allOf(completionFutures).join();
        // Get a list of all the operations, before any truncation.
        originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
        // Stop the processor.
        durableLog.stopAsync().awaitTerminated();
    }
    // Truncate up to each MetadataCheckpointOperation and:
    // * If the DataLog was truncated:
    // ** Shut down DurableLog, re-start it (recovery) and verify the operations are as they should.
    // At the end, verify all operations and all entries in the DataLog were truncated.
    DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
    try {
        durableLog.startAsync().awaitRunning();
        dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
        for (int i = 0; i < originalOperations.size(); i++) {
            Operation currentOperation = originalOperations.get(i);
            if (!(currentOperation instanceof MetadataCheckpointOperation)) {
                // We can only truncate on MetadataCheckpointOperations.
                continue;
            }
            truncationOccurred.set(false);
            durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
            if (truncationOccurred.get()) {
                // Close current DurableLog and start a brand new one, forcing recovery.
                durableLog.close();
                durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService());
                durableLog.startAsync().awaitRunning();
                dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
                // Verify all operations up to, and including this one have been removed.
                Queue<Operation> reader = durableLog.read(2, TIMEOUT).join();
                Assert.assertFalse("Not expecting an empty log after truncating an operation (a MetadataCheckpoint must always exist).", reader.isEmpty());
                verifyFirstItemIsMetadataCheckpoint(reader.iterator());
                if (i < originalOperations.size() - 1) {
                    Operation firstOp = reader.poll();
                    OperationComparer.DEFAULT.assertEquals(String.format("Unexpected first operation after truncating SeqNo %d.", currentOperation.getSequenceNumber()), originalOperations.get(i + 1), firstOp);
                }
            }
        }
    } finally {
        // This closes whatever current instance this variable refers to, not necessarily the first one.
        durableLog.close();
    }
}
Also used : DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) TestDurableDataLog(io.pravega.segmentstore.server.TestDurableDataLog) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentMapOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) Cleanup(lombok.Cleanup) CacheManager(io.pravega.segmentstore.server.CacheManager) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) StorageMetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation) MetadataCheckpointOperation(io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) ReadIndex(io.pravega.segmentstore.server.ReadIndex) AtomicReference(java.util.concurrent.atomic.AtomicReference) InMemoryDurableDataLogFactory(io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory) ContainerReadIndex(io.pravega.segmentstore.server.reading.ContainerReadIndex) Storage(io.pravega.segmentstore.storage.Storage) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) TestDurableDataLogFactory(io.pravega.segmentstore.server.TestDurableDataLogFactory) Test(org.junit.Test)

Aggregations

DirectMemoryCache (io.pravega.segmentstore.storage.cache.DirectMemoryCache)12 Cleanup (lombok.Cleanup)12 Test (org.junit.Test)12 lombok.val (lombok.val)9 CacheManager (io.pravega.segmentstore.server.CacheManager)8 MetadataBuilder (io.pravega.segmentstore.server.MetadataBuilder)8 TestDurableDataLogFactory (io.pravega.segmentstore.server.TestDurableDataLogFactory)8 StreamSegmentMapOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation)8 ContainerReadIndex (io.pravega.segmentstore.server.reading.ContainerReadIndex)8 Storage (io.pravega.segmentstore.storage.Storage)8 CacheStorage (io.pravega.segmentstore.storage.cache.CacheStorage)8 InMemoryDurableDataLogFactory (io.pravega.segmentstore.storage.mocks.InMemoryDurableDataLogFactory)8 ReadIndex (io.pravega.segmentstore.server.ReadIndex)7 CachedStreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation)7 DeleteSegmentOperation (io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation)7 MergeSegmentOperation (io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation)7 MetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.MetadataCheckpointOperation)7 Operation (io.pravega.segmentstore.server.logs.operations.Operation)7 StorageMetadataCheckpointOperation (io.pravega.segmentstore.server.logs.operations.StorageMetadataCheckpointOperation)7 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)7