Search in sources :

Example 11 with CachePolicy

use of io.pravega.segmentstore.server.CachePolicy in project pravega by pravega.

the class ContainerReadIndexTests method testConcurrentEvictAppend.

/**
 * Tests a scenario where a call to {@link StreamSegmentReadIndex#append} executes concurrently with a Cache Manager
 * eviction. In particular, this tests the following scenario:
 * - We have a Cache Entry E1 with Generation G1, and its entire contents is in Storage.
 * - E1 maps to the end of the Segment.
 * - We initiate an append A1, which will update the contents of E1.
 * - The Cache Manager executes.
 * - E1 would be eligible for eviction prior to the Cache Manager run, but not after.
 * - We need to validate that E1 is not evicted and that A2 is immediately available for reading, and so is the data
 * prior to it.
 */
@Test
public void testConcurrentEvictAppend() throws Exception {
    val rnd = new Random(0);
    CachePolicy cachePolicy = new CachePolicy(1, Duration.ZERO, Duration.ofMillis(1));
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
    final int blockSize = context.cacheStorage.getBlockAlignment();
    // Not blocking anything now.
    context.cacheStorage.appendReturnBlocker = null;
    // Create segment and make one append, less than the cache block size.
    long segmentId = createSegment(0, context);
    val segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
    createSegmentsInStorage(context);
    val append1 = new ByteArraySegment(new byte[blockSize / 2]);
    rnd.nextBytes(append1.array());
    segmentMetadata.setLength(append1.getLength());
    context.readIndex.append(segmentId, 0, append1);
    segmentMetadata.setStorageLength(append1.getLength());
    // Block further cache appends. This will give us time to execute cache eviction.
    context.cacheStorage.appendReturnBlocker = new ReusableLatch();
    context.cacheStorage.appendComplete = new ReusableLatch();
    // Initiate append 2. The append should be written to the Cache Storage, but its invocation should block until
    // we release the above latch.
    val append2 = new ByteArraySegment(new byte[blockSize - append1.getLength() - 1]);
    rnd.nextBytes(append2.array());
    segmentMetadata.setLength(append1.getLength() + append2.getLength());
    val append2Future = CompletableFuture.runAsync(() -> {
        try {
            context.readIndex.append(segmentId, append1.getLength(), append2);
        } catch (Exception ex) {
            throw new CompletionException(ex);
        }
    }, executorService());
    context.cacheStorage.appendComplete.await();
    // Execute cache eviction. Append 2 is suspended at the point when we return from the cache call. This is the
    // closest we can come to simulating eviction racing with appending.
    val evictionFuture = CompletableFuture.supplyAsync(context.cacheManager::applyCachePolicy, this.executorService());
    // We want to verify that the cache eviction is blocked on the append - they should not run concurrently. The only
    // "elegant" way of verifying this is by waiting a short amount of time and checking that it didn't execute.
    AssertExtensions.assertThrows("Expecting cache eviction to block.", () -> evictionFuture.get(SHORT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> ex instanceof TimeoutException);
    // Release the second append, which should not error out.
    context.cacheStorage.appendReturnBlocker.release();
    append2Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // Verify that no cache eviction happened.
    boolean evicted = evictionFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    Assert.assertFalse("Not expected a cache eviction to happen.", evicted);
    // Validate data read back is as expected.
    // readDirect() should return an InputStream for the second append range.
    val readData = context.readIndex.readDirect(segmentId, append1.getLength(), append2.getLength());
    Assert.assertNotNull("Expected append2 to be read back.", readData);
    AssertExtensions.assertStreamEquals("Unexpected data read back from append2.", append2.getReader(), readData.getReader(), append2.getLength());
    // Reading the whole segment should work well too.
    byte[] allData = new byte[append1.getLength() + append2.getLength()];
    context.readIndex.read(segmentId, 0, allData.length, TIMEOUT).readRemaining(allData, TIMEOUT);
    AssertExtensions.assertArrayEquals("Unexpected data read back from segment.", append1.array(), 0, allData, 0, append1.getLength());
    AssertExtensions.assertArrayEquals("Unexpected data read back from segment.", append2.array(), 0, allData, append1.getLength(), append2.getLength());
}
Also used : lombok.val(lombok.val) ByteArraySegment(io.pravega.common.util.ByteArraySegment) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) TimeoutException(java.util.concurrent.TimeoutException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) CancellationException(java.util.concurrent.CancellationException) CompletionException(java.util.concurrent.CompletionException) ObjectClosedException(io.pravega.common.ObjectClosedException) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) CachePolicy(io.pravega.segmentstore.server.CachePolicy) ReusableLatch(io.pravega.common.util.ReusableLatch) Random(java.util.Random) CompletionException(java.util.concurrent.CompletionException) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 12 with CachePolicy

use of io.pravega.segmentstore.server.CachePolicy in project pravega by pravega.

the class ContainerReadIndexTests method testMergeFutureReadCancelledOnClose.

/**
 * Verifies that any FutureRead that resulted from a partial merge operation is cancelled when the ReadIndex is closed.
 */
@Test
public void testMergeFutureReadCancelledOnClose() throws Exception {
    CachePolicy cachePolicy = new CachePolicy(1, Duration.ZERO, Duration.ofMillis(1));
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
    // Create parent segment and one transaction
    long parentId = createSegment(0, context);
    long transactionId = createTransaction(1, context);
    createSegmentsInStorage(context);
    ByteArraySegment writeData = getAppendData(context.metadata.getStreamSegmentMetadata(transactionId).getName(), transactionId, 0, 0);
    RedirectedReadResultEntry entry = (RedirectedReadResultEntry) setupMergeRead(parentId, transactionId, writeData.getCopy(), context);
    // There are a number of async tasks going on here. One of them is in RedirectedReadResultEntry which needs to switch
    // from the first attempt to a second one. Since we have no hook to know when that happens exactly, the only thing
    // we can do is check periodically until that is done.
    TestUtils.await(entry::hasSecondEntrySet, 10, TIMEOUT.toMillis());
    // Close the index.
    context.readIndex.close();
    // Verify the entry is cancelled. Invoke get() since the cancellation is asynchronous so it may not yet have
    // been executed; get() will block until that happens.
    AssertExtensions.assertThrows("Expected entry to have been cancelled upon closing", () -> entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> ex instanceof CancellationException);
}
Also used : CachePolicy(io.pravega.segmentstore.server.CachePolicy) ByteArraySegment(io.pravega.common.util.ByteArraySegment) CancellationException(java.util.concurrent.CancellationException) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 13 with CachePolicy

use of io.pravega.segmentstore.server.CachePolicy in project pravega by pravega.

the class ContainerReadIndexTests method testCacheFullDeadlock.

/**
 * Tests a deadlock-prone scenario involving multiple Storage read requests from multiple segments, all hitting a
 * CacheFullException while trying to process.
 *
 * Steps:
 * 1. Segment 1: Storage Read Complete -> Ack -> Insert in Index -> Acquire (ReadIndex1.Lock[Thread1]) -> Insert in Cache [Request1]
 * 2. Segment 2: Storage Read Complete -> Ack -> Insert in Index -> Acquire (ReadIndex2.Lock[Thread2]) -> Insert in Cache [Request2]
 * 3. Cache is full. Deadlock occurs if:
 * 3.1. [Request1] invokes Cache Eviction, which wants to acquire ReadIndex2.Lock, but it is owned by Thread2.
 * 3.2. [Request2] invokes Cache Eviction, which wants to acquire ReadIndex1.Lock, but it is owned by Thread1.
 *
 * This test verifies that no deadlock occurs by simulating this exact scenario. It verifies that all requests eventually
 * complete successfully (as the deadlock victim will back off and retry).
 */
@Test
public void testCacheFullDeadlock() throws Exception {
    // This is the actual cache size, even if we set a lower value than this.
    val maxCacheSize = 2 * 1024 * 1024;
    // Fill up most of the cache - this is also a candidate for eviction.
    val append1Size = (int) (0.75 * maxCacheSize);
    // Dummy append - need to register the read index as a cache client.
    val append2Size = 1;
    val segmentSize = maxCacheSize + 1;
    val config = ReadIndexConfig.builder().with(ReadIndexConfig.MEMORY_READ_MIN_LENGTH, // Default: Off (we have a special test for this).
    0).with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, maxCacheSize).build();
    CachePolicy cachePolicy = new CachePolicy(maxCacheSize, Duration.ZERO, Duration.ofMillis(1));
    @Cleanup TestContext context = new TestContext(config, cachePolicy, maxCacheSize);
    // Block the first insert (this will be from segment 1
    val append1Address = new AtomicInteger(0);
    context.cacheStorage.insertCallback = a -> append1Address.compareAndSet(0, a);
    val segment1Delete = new ReusableLatch();
    context.cacheStorage.beforeDelete = deleteAddress -> {
        if (deleteAddress == append1Address.get()) {
            // Block eviction of the first segment 1 data (just the first; we want the rest to go through).
            Exceptions.handleInterrupted(segment1Delete::await);
        }
    };
    // Create segments and make each of them slightly bigger than the cache capacity.
    long segment1Id = createSegment(0, context);
    long segment2Id = createSegment(1, context);
    val segment1Metadata = context.metadata.getStreamSegmentMetadata(segment1Id);
    val segment2Metadata = context.metadata.getStreamSegmentMetadata(segment2Id);
    segment1Metadata.setLength(segmentSize);
    segment1Metadata.setStorageLength(segmentSize);
    segment2Metadata.setLength(segmentSize);
    segment2Metadata.setStorageLength(segmentSize);
    createSegmentsInStorage(context);
    context.storage.openWrite(segment1Metadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[segmentSize]), segmentSize, TIMEOUT)).join();
    context.storage.openWrite(segment2Metadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[segmentSize]), segmentSize, TIMEOUT)).join();
    // Write some data into the cache. This will become a candidate for eviction at the next step.
    context.readIndex.append(segment1Id, 0, new ByteArraySegment(new byte[append1Size]));
    // Write some data into Segment 2's index. This will have no effect on the cache, but we will register it with the Cache Manager.
    context.readIndex.append(segment2Id, 0, new ByteArraySegment(new byte[append2Size]));
    // Initiate the first Storage read. This should exceed the max cache size, so it should trigger the cleanup.
    val segment1Read = context.readIndex.read(segment1Id, append1Size, segmentSize - append1Size, TIMEOUT).next();
    Assert.assertEquals(ReadResultEntryType.Storage, segment1Read.getType());
    segment1Read.requestContent(TIMEOUT);
    // This one should complete right away.
    segment1Read.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // Wait for the delete callback to be latched.
    TestUtils.await(() -> segment1Delete.getQueueLength() > 0, 10, TIMEOUT.toMillis());
    // Initiate the second Storage read. This should also exceed the max cache size and trigger another cleanup, but
    // (most importantly) on a different thread.
    val segment2Read = context.readIndex.read(segment2Id, append2Size, segmentSize - append2Size, TIMEOUT).next();
    Assert.assertEquals(ReadResultEntryType.Storage, segment2Read.getType());
    segment2Read.requestContent(TIMEOUT);
    // As with the first one, this should complete right away.
    segment2Read.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // We use yet another thread to validate that no deadlock occurs. This should briefly block on Segment 2's Read index's
    // lock, but it should be unblocked when we release that (next step).
    val append2Future = CompletableFuture.runAsync(() -> {
        try {
            context.readIndex.append(segment2Id, append2Size, new ByteArraySegment(new byte[append1Size]));
        } catch (Exception ex) {
            throw new CompletionException(ex);
        }
    }, executorService());
    // Release the delete blocker. If all goes well, all the other operations should be unblocked at this point.
    segment1Delete.release();
    append2Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
Also used : lombok.val(lombok.val) Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) AssertExtensions(io.pravega.test.common.AssertExtensions) ReadOnlyStorage(io.pravega.segmentstore.storage.ReadOnlyStorage) RequiredArgsConstructor(lombok.RequiredArgsConstructor) TimeoutException(java.util.concurrent.TimeoutException) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Map(java.util.Map) CachePolicy(io.pravega.segmentstore.server.CachePolicy) TestCacheManager(io.pravega.segmentstore.server.TestCacheManager) CancellationException(java.util.concurrent.CancellationException) Collection(java.util.Collection) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) CompletionException(java.util.concurrent.CompletionException) ReadResultEntryType(io.pravega.segmentstore.contracts.ReadResultEntryType) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) StreamSegmentMetadata(io.pravega.segmentstore.server.containers.StreamSegmentMetadata) List(java.util.List) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) TestUtils(io.pravega.test.common.TestUtils) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) TestStorage(io.pravega.segmentstore.server.TestStorage) ObjectClosedException(io.pravega.common.ObjectClosedException) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Getter(lombok.Getter) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) EvictableMetadata(io.pravega.segmentstore.server.EvictableMetadata) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) CacheState(io.pravega.segmentstore.storage.cache.CacheState) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) ReusableLatch(io.pravega.common.util.ReusableLatch) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) NameUtils(io.pravega.shared.NameUtils) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) Mockito(org.mockito.Mockito) Rule(org.junit.Rule) Assert(org.junit.Assert) Collections(java.util.Collections) ByteArraySegment(io.pravega.common.util.ByteArraySegment) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) TimeoutException(java.util.concurrent.TimeoutException) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) CancellationException(java.util.concurrent.CancellationException) CompletionException(java.util.concurrent.CompletionException) ObjectClosedException(io.pravega.common.ObjectClosedException) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) IntentionalException(io.pravega.test.common.IntentionalException) IOException(java.io.IOException) CachePolicy(io.pravega.segmentstore.server.CachePolicy) ReusableLatch(io.pravega.common.util.ReusableLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ByteArrayInputStream(java.io.ByteArrayInputStream) CompletionException(java.util.concurrent.CompletionException) Test(org.junit.Test)

Aggregations

CachePolicy (io.pravega.segmentstore.server.CachePolicy)13 Cleanup (lombok.Cleanup)12 Test (org.junit.Test)12 ByteArraySegment (io.pravega.common.util.ByteArraySegment)11 lombok.val (lombok.val)10 BufferView (io.pravega.common.util.BufferView)9 ReadResultEntry (io.pravega.segmentstore.contracts.ReadResultEntry)9 ObjectClosedException (io.pravega.common.ObjectClosedException)8 ReusableLatch (io.pravega.common.util.ReusableLatch)8 ReadResult (io.pravega.segmentstore.contracts.ReadResult)8 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)8 StreamSegmentSealedException (io.pravega.segmentstore.contracts.StreamSegmentSealedException)8 StreamSegmentTruncatedException (io.pravega.segmentstore.contracts.StreamSegmentTruncatedException)8 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)8 IntentionalException (io.pravega.test.common.IntentionalException)8 Exceptions (io.pravega.common.Exceptions)7 Futures (io.pravega.common.concurrent.Futures)7 ReadResultEntryType (io.pravega.segmentstore.contracts.ReadResultEntryType)7 EvictableMetadata (io.pravega.segmentstore.server.EvictableMetadata)7 MetadataBuilder (io.pravega.segmentstore.server.MetadataBuilder)7