use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class ContainerReadIndexTests method testConcurrentEvictAppend.
/**
* Tests a scenario where a call to {@link StreamSegmentReadIndex#append} executes concurrently with a Cache Manager
* eviction. In particular, this tests the following scenario:
* - We have a Cache Entry E1 with Generation G1, and its entire contents is in Storage.
* - E1 maps to the end of the Segment.
* - We initiate an append A1, which will update the contents of E1.
* - The Cache Manager executes.
* - E1 would be eligible for eviction prior to the Cache Manager run, but not after.
* - We need to validate that E1 is not evicted and that A2 is immediately available for reading, and so is the data
* prior to it.
*/
@Test
public void testConcurrentEvictAppend() throws Exception {
val rnd = new Random(0);
CachePolicy cachePolicy = new CachePolicy(1, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
final int blockSize = context.cacheStorage.getBlockAlignment();
// Not blocking anything now.
context.cacheStorage.appendReturnBlocker = null;
// Create segment and make one append, less than the cache block size.
long segmentId = createSegment(0, context);
val segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
createSegmentsInStorage(context);
val append1 = new ByteArraySegment(new byte[blockSize / 2]);
rnd.nextBytes(append1.array());
segmentMetadata.setLength(append1.getLength());
context.readIndex.append(segmentId, 0, append1);
segmentMetadata.setStorageLength(append1.getLength());
// Block further cache appends. This will give us time to execute cache eviction.
context.cacheStorage.appendReturnBlocker = new ReusableLatch();
context.cacheStorage.appendComplete = new ReusableLatch();
// Initiate append 2. The append should be written to the Cache Storage, but its invocation should block until
// we release the above latch.
val append2 = new ByteArraySegment(new byte[blockSize - append1.getLength() - 1]);
rnd.nextBytes(append2.array());
segmentMetadata.setLength(append1.getLength() + append2.getLength());
val append2Future = CompletableFuture.runAsync(() -> {
try {
context.readIndex.append(segmentId, append1.getLength(), append2);
} catch (Exception ex) {
throw new CompletionException(ex);
}
}, executorService());
context.cacheStorage.appendComplete.await();
// Execute cache eviction. Append 2 is suspended at the point when we return from the cache call. This is the
// closest we can come to simulating eviction racing with appending.
val evictionFuture = CompletableFuture.supplyAsync(context.cacheManager::applyCachePolicy, this.executorService());
// We want to verify that the cache eviction is blocked on the append - they should not run concurrently. The only
// "elegant" way of verifying this is by waiting a short amount of time and checking that it didn't execute.
AssertExtensions.assertThrows("Expecting cache eviction to block.", () -> evictionFuture.get(SHORT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> ex instanceof TimeoutException);
// Release the second append, which should not error out.
context.cacheStorage.appendReturnBlocker.release();
append2Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify that no cache eviction happened.
boolean evicted = evictionFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertFalse("Not expected a cache eviction to happen.", evicted);
// Validate data read back is as expected.
// readDirect() should return an InputStream for the second append range.
val readData = context.readIndex.readDirect(segmentId, append1.getLength(), append2.getLength());
Assert.assertNotNull("Expected append2 to be read back.", readData);
AssertExtensions.assertStreamEquals("Unexpected data read back from append2.", append2.getReader(), readData.getReader(), append2.getLength());
// Reading the whole segment should work well too.
byte[] allData = new byte[append1.getLength() + append2.getLength()];
context.readIndex.read(segmentId, 0, allData.length, TIMEOUT).readRemaining(allData, TIMEOUT);
AssertExtensions.assertArrayEquals("Unexpected data read back from segment.", append1.array(), 0, allData, 0, append1.getLength());
AssertExtensions.assertArrayEquals("Unexpected data read back from segment.", append2.array(), 0, allData, append1.getLength(), append2.getLength());
}
use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class ContainerReadIndexTests method testCacheFullDeadlock.
/**
* Tests a deadlock-prone scenario involving multiple Storage read requests from multiple segments, all hitting a
* CacheFullException while trying to process.
*
* Steps:
* 1. Segment 1: Storage Read Complete -> Ack -> Insert in Index -> Acquire (ReadIndex1.Lock[Thread1]) -> Insert in Cache [Request1]
* 2. Segment 2: Storage Read Complete -> Ack -> Insert in Index -> Acquire (ReadIndex2.Lock[Thread2]) -> Insert in Cache [Request2]
* 3. Cache is full. Deadlock occurs if:
* 3.1. [Request1] invokes Cache Eviction, which wants to acquire ReadIndex2.Lock, but it is owned by Thread2.
* 3.2. [Request2] invokes Cache Eviction, which wants to acquire ReadIndex1.Lock, but it is owned by Thread1.
*
* This test verifies that no deadlock occurs by simulating this exact scenario. It verifies that all requests eventually
* complete successfully (as the deadlock victim will back off and retry).
*/
@Test
public void testCacheFullDeadlock() throws Exception {
// This is the actual cache size, even if we set a lower value than this.
val maxCacheSize = 2 * 1024 * 1024;
// Fill up most of the cache - this is also a candidate for eviction.
val append1Size = (int) (0.75 * maxCacheSize);
// Dummy append - need to register the read index as a cache client.
val append2Size = 1;
val segmentSize = maxCacheSize + 1;
val config = ReadIndexConfig.builder().with(ReadIndexConfig.MEMORY_READ_MIN_LENGTH, // Default: Off (we have a special test for this).
0).with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, maxCacheSize).build();
CachePolicy cachePolicy = new CachePolicy(maxCacheSize, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(config, cachePolicy, maxCacheSize);
// Block the first insert (this will be from segment 1
val append1Address = new AtomicInteger(0);
context.cacheStorage.insertCallback = a -> append1Address.compareAndSet(0, a);
val segment1Delete = new ReusableLatch();
context.cacheStorage.beforeDelete = deleteAddress -> {
if (deleteAddress == append1Address.get()) {
// Block eviction of the first segment 1 data (just the first; we want the rest to go through).
Exceptions.handleInterrupted(segment1Delete::await);
}
};
// Create segments and make each of them slightly bigger than the cache capacity.
long segment1Id = createSegment(0, context);
long segment2Id = createSegment(1, context);
val segment1Metadata = context.metadata.getStreamSegmentMetadata(segment1Id);
val segment2Metadata = context.metadata.getStreamSegmentMetadata(segment2Id);
segment1Metadata.setLength(segmentSize);
segment1Metadata.setStorageLength(segmentSize);
segment2Metadata.setLength(segmentSize);
segment2Metadata.setStorageLength(segmentSize);
createSegmentsInStorage(context);
context.storage.openWrite(segment1Metadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[segmentSize]), segmentSize, TIMEOUT)).join();
context.storage.openWrite(segment2Metadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[segmentSize]), segmentSize, TIMEOUT)).join();
// Write some data into the cache. This will become a candidate for eviction at the next step.
context.readIndex.append(segment1Id, 0, new ByteArraySegment(new byte[append1Size]));
// Write some data into Segment 2's index. This will have no effect on the cache, but we will register it with the Cache Manager.
context.readIndex.append(segment2Id, 0, new ByteArraySegment(new byte[append2Size]));
// Initiate the first Storage read. This should exceed the max cache size, so it should trigger the cleanup.
val segment1Read = context.readIndex.read(segment1Id, append1Size, segmentSize - append1Size, TIMEOUT).next();
Assert.assertEquals(ReadResultEntryType.Storage, segment1Read.getType());
segment1Read.requestContent(TIMEOUT);
// This one should complete right away.
segment1Read.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Wait for the delete callback to be latched.
TestUtils.await(() -> segment1Delete.getQueueLength() > 0, 10, TIMEOUT.toMillis());
// Initiate the second Storage read. This should also exceed the max cache size and trigger another cleanup, but
// (most importantly) on a different thread.
val segment2Read = context.readIndex.read(segment2Id, append2Size, segmentSize - append2Size, TIMEOUT).next();
Assert.assertEquals(ReadResultEntryType.Storage, segment2Read.getType());
segment2Read.requestContent(TIMEOUT);
// As with the first one, this should complete right away.
segment2Read.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// We use yet another thread to validate that no deadlock occurs. This should briefly block on Segment 2's Read index's
// lock, but it should be unblocked when we release that (next step).
val append2Future = CompletableFuture.runAsync(() -> {
try {
context.readIndex.append(segment2Id, append2Size, new ByteArraySegment(new byte[append1Size]));
} catch (Exception ex) {
throw new CompletionException(ex);
}
}, executorService());
// Release the delete blocker. If all goes well, all the other operations should be unblocked at this point.
segment1Delete.release();
append2Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class StreamSegmentContainerRegistryTests method testContainerFailureOnStartup.
/**
* Tests the ability to detect a container failure and unregister the container in case the container fails on startup.
*/
@Test
public void testContainerFailureOnStartup() throws Exception {
final int containerId = 123;
// We insert a ReusableLatch that will allow us to manually delay the TestContainer's shutdown/closing process
// so that we have enough time to verify that calling getContainer() on a currently shutting down container will
// throw the appropriate exception.
ReusableLatch closeReleaseSignal = new ReusableLatch();
TestContainerFactory factory = new TestContainerFactory(new IntentionalException(), closeReleaseSignal);
@Cleanup StreamSegmentContainerRegistry registry = new StreamSegmentContainerRegistry(factory, executorService());
AssertExtensions.assertThrows("Unexpected exception thrown upon failed container startup.", registry.startContainer(containerId, TIMEOUT)::join, ex -> ex instanceof IntentionalException || (ex instanceof IllegalStateException && ex.getCause() instanceof IntentionalException));
AssertExtensions.assertThrows("Container is registered even if it failed to start (and is currently shut down).", () -> registry.getContainer(containerId), ex -> ex instanceof ContainerNotFoundException);
// Unblock container closing, which will, in turn, unblock its de-registration.
closeReleaseSignal.release();
AssertExtensions.assertThrows("Container is registered even if it failed to start (and has been unregistered).", () -> registry.getContainer(containerId), ex -> ex instanceof ContainerNotFoundException);
}
use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class StreamSegmentContainerRegistryTests method testStartAlreadyRunning.
/**
* Tests a scenario where a container startup is requested immediately after the shutdown of the same container or
* while that one is running. This tests both the case when a container auto-shuts down due to failure and when it
* is shut down in a controlled manner.
*/
@Test
public void testStartAlreadyRunning() throws Exception {
final int containerId = 1;
TestContainerFactory factory = new TestContainerFactory();
@Cleanup StreamSegmentContainerRegistry registry = new StreamSegmentContainerRegistry(factory, executorService());
registry.startContainer(containerId, TIMEOUT).join();
TestContainer container1 = (TestContainer) registry.getContainer(containerId);
// 1. While running.
AssertExtensions.assertThrows("startContainer() did not throw for already registered container.", () -> registry.startContainer(containerId, TIMEOUT), ex -> ex instanceof IllegalArgumentException);
// 2. After a container fails - while shutting down.
// Manually control when the Container actually shuts down.
container1.stopSignal = new ReusableLatch();
container1.fail(new IntentionalException());
val startContainer2 = registry.startContainer(containerId, TIMEOUT);
Assert.assertFalse("startContainer() completed before previous container shut down (with failure).", startContainer2.isDone());
container1.stopSignal.release();
startContainer2.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
TestContainer container2 = (TestContainer) registry.getContainer(containerId);
Assert.assertEquals("Container1 was not shut down (with failure).", Service.State.FAILED, container1.state());
Assert.assertEquals("Container2 was not started properly.", Service.State.RUNNING, container2.state());
// 3. After a controlled shutdown - while shutting down.
// Manually control when the Container actually shuts down.
container2.stopSignal = new ReusableLatch();
container2.stopAsync();
val startContainer3 = registry.startContainer(containerId, TIMEOUT);
Assert.assertFalse("startContainer() completed before previous container shut down (normally).", startContainer3.isDone());
container2.stopSignal.release();
startContainer3.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
TestContainer container3 = (TestContainer) registry.getContainer(containerId);
Assert.assertEquals("Container2 was not shut down (normally).", Service.State.TERMINATED, container2.state());
Assert.assertEquals("Container3 was not started properly.", Service.State.RUNNING, container3.state());
}
use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class LatestItemSequentialProcessorTest method testNotCalledInParallel.
@Test
public void testNotCalledInParallel() throws InterruptedException {
@Cleanup("shutdown") ExecutorService pool = Executors.newFixedThreadPool(2);
CountDownLatch parCheck = new CountDownLatch(2);
ReusableLatch latch = new ReusableLatch(false);
LatestItemSequentialProcessor<String> processor = new LatestItemSequentialProcessor<>(s -> {
parCheck.countDown();
latch.awaitUninterruptibly();
}, pool);
processor.updateItem("a");
processor.updateItem("b");
processor.updateItem("c");
AssertExtensions.assertBlocks(() -> parCheck.await(), () -> parCheck.countDown());
latch.release();
pool.shutdown();
pool.awaitTermination(5, TimeUnit.SECONDS);
}
Aggregations