use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class AppendProcessorTest method testDelayedDataAppended.
/**
* Test to ensure newer appends are processed only after successfully sending the DataAppended acknowledgement
* back to client. This test tests the following:
* - If sending first DataAppended is blocked, ensure future appends are not written to store.
* - Once the first DataAppended is sent ensure the remaining appends are written to store and DataAppended ack'ed
* back.
*/
@Test(timeout = 15 * 1000)
public void testDelayedDataAppended() throws Exception {
ReusableLatch firstStoreAppendInvoked = new ReusableLatch();
ReusableLatch completeFirstDataAppendedAck = new ReusableLatch();
ReusableLatch secondStoreAppendInvoked = new ReusableLatch();
@Cleanup("shutdownNow") ScheduledExecutorService nettyExecutor = ExecutorServiceHelpers.newScheduledThreadPool(1, "Netty-threadPool");
String streamSegmentName = "testDelayedAppend";
UUID clientId = UUID.randomUUID();
byte[] data = new byte[] { 1, 2, 3, 4, 6, 7, 8, 9 };
StreamSegmentStore store = mock(StreamSegmentStore.class);
ServerConnection connection = mock(ServerConnection.class);
// Ensure the first DataAppended is hung/delayed.
doAnswer(invocation -> {
firstStoreAppendInvoked.release();
// wait, simulating a hung/delayed dataAppended acknowledgement.
completeFirstDataAppendedAck.await();
return null;
}).doAnswer(invocation -> {
secondStoreAppendInvoked.release();
return null;
}).when(connection).send(any(DataAppended.class));
AppendProcessor processor = new AppendProcessor(store, connection, new FailingRequestProcessor(), null);
CompletableFuture<SegmentProperties> propsFuture = CompletableFuture.completedFuture(StreamSegmentInformation.builder().name(streamSegmentName).build());
when(store.getStreamSegmentInfo(streamSegmentName, true, AppendProcessor.TIMEOUT)).thenReturn(propsFuture);
processor.setupAppend(new SetupAppend(1, clientId, streamSegmentName, ""));
verify(store).getStreamSegmentInfo(streamSegmentName, true, AppendProcessor.TIMEOUT);
CompletableFuture<Void> result = CompletableFuture.completedFuture(null);
int eventCount = 100;
when(store.append(streamSegmentName, data, updateEventNumber(clientId, 100, SegmentMetadata.NULL_ATTRIBUTE_VALUE, eventCount), AppendProcessor.TIMEOUT)).thenReturn(result);
// Trigger the first append, here the sending of DataAppended ack will be delayed/hung.
nettyExecutor.submit(() -> processor.append(new Append(streamSegmentName, clientId, 100, eventCount, Unpooled.wrappedBuffer(data), null)));
firstStoreAppendInvoked.await();
verify(store).append(streamSegmentName, data, updateEventNumber(clientId, 100, SegmentMetadata.NULL_ATTRIBUTE_VALUE, eventCount), AppendProcessor.TIMEOUT);
/* Trigger the next append. This should be completed immediately and should not cause a store.append to be
invoked as the previous DataAppended ack is still not sent. */
processor.append(new Append(streamSegmentName, clientId, 200, eventCount, Unpooled.wrappedBuffer(data), null));
// Since the first Ack was never sent the next append should not be written to the store.
verifyNoMoreInteractions(store);
// Setup mock for check behaviour after the delayed/hung dataAppended completes.
when(store.append(streamSegmentName, data, updateEventNumber(clientId, 200, 100, eventCount), AppendProcessor.TIMEOUT)).thenReturn(result);
// Now ensure the dataAppended sent
completeFirstDataAppendedAck.release();
// wait until the next store append is invoked.
secondStoreAppendInvoked.await();
// Verify that the next store append invoked.
verify(store).append(streamSegmentName, data, updateEventNumber(clientId, 200, 100, eventCount), AppendProcessor.TIMEOUT);
// Verify two DataAppended acks are sent out.
verify(connection, times(2)).send(any(DataAppended.class));
verify(connection).send(new DataAppended(clientId, 100, Long.MIN_VALUE));
verify(connection).send(new DataAppended(clientId, 200, 100));
}
use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class ContainerReadIndexTests method testConcurrentEvictionTransactionStorageMerge.
/**
* Tests a scenario where a call to {@link StreamSegmentReadIndex#completeMerge} executes concurrently with a
* CacheManager eviction. The Cache Manager must not evict the data for recently transferred entries, even if they
* would otherwise be eligible for eviction in the source segment.
*/
@Test
public void testConcurrentEvictionTransactionStorageMerge() throws Exception {
val mergeOffset = 1;
val appendLength = 1;
CachePolicy cachePolicy = new CachePolicy(1, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Create parent segment and one transaction
long targetId = createSegment(0, context);
long sourceId = createTransaction(1, context);
val targetMetadata = context.metadata.getStreamSegmentMetadata(targetId);
val sourceMetadata = context.metadata.getStreamSegmentMetadata(sourceId);
createSegmentsInStorage(context);
// Write something to the parent segment.
appendSingleWrite(targetId, new ByteArraySegment(new byte[mergeOffset]), context);
context.storage.openWrite(targetMetadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[mergeOffset]), mergeOffset, TIMEOUT)).join();
// Write something to the transaction, but do not write anything in Storage - we want to verify we don't even
// try to reach in there.
val sourceContents = getAppendData(context.metadata.getStreamSegmentMetadata(sourceId).getName(), sourceId, 0, 0);
appendSingleWrite(sourceId, sourceContents, context);
sourceMetadata.setStorageLength(sourceMetadata.getLength());
// Seal & Begin-merge the transaction (do not seal in storage).
sourceMetadata.markSealed();
targetMetadata.setLength(sourceMetadata.getLength() + mergeOffset);
context.readIndex.beginMerge(targetId, mergeOffset, sourceId);
sourceMetadata.markMerged();
sourceMetadata.markDeleted();
// Trigger a Complete Merge. We want to intercept and pause it immediately before it is unregistered from the
// Cache Manager.
@Cleanup("release") val unregisterCalled = new ReusableLatch();
@Cleanup("release") val unregisterBlocker = new ReusableLatch();
context.cacheManager.setUnregisterInterceptor(c -> {
unregisterCalled.release();
Exceptions.handleInterrupted(unregisterBlocker::await);
});
val completeMerge = CompletableFuture.runAsync(() -> {
try {
context.readIndex.completeMerge(targetId, sourceId);
} catch (Exception ex) {
throw new CompletionException(ex);
}
}, executorService());
// Clear the cache. The source Read index is still registered in the Cache Manager - we want to ensure that any
// eviction happening at this point will not delete anything from the Cache that we don't want deleted.
unregisterCalled.await();
context.cacheManager.applyCachePolicy();
// Wait for the operation to complete.
unregisterBlocker.release();
completeMerge.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify that we can append (appending will modify the last cache entry; if it had been modified this would not
// work anymore).
val appendOffset = (int) targetMetadata.getLength();
val appendData = new byte[appendLength];
appendData[0] = (byte) 23;
targetMetadata.setLength(appendOffset + appendLength);
context.readIndex.append(targetId, appendOffset, new ByteArraySegment(appendData));
// Issue a read and verify we can read everything that we wrote. If it had been evicted or erroneously deleted
// from the cache this would result in an error.
byte[] expectedData = new byte[appendOffset + appendLength];
sourceContents.copyTo(expectedData, mergeOffset, sourceContents.getLength());
System.arraycopy(appendData, 0, expectedData, appendOffset, appendLength);
ReadResult rr = context.readIndex.read(targetId, 0, expectedData.length, TIMEOUT);
Assert.assertTrue("Parent Segment read indicates no data available.", rr.hasNext());
byte[] actualData = new byte[expectedData.length];
rr.readRemaining(actualData, TIMEOUT);
Assert.assertArrayEquals("Unexpected data read back.", expectedData, actualData);
}
use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class SegmentOutputStreamTest method testFlushIsBlockedUntilCallBackInvoked.
/**
* This test ensures that the flush() on a segment is released only after sealed segment callback is invoked.
* The callback implemented in EventStreamWriter appends this segment to its sealedSegmentQueue.
*/
@Test(timeout = 10000)
public void testFlushIsBlockedUntilCallBackInvoked() throws Exception {
// Segment sealed callback will finish execution only when the latch is released;
ReusableLatch latch = new ReusableLatch(false);
final Consumer<Segment> segmentSealedCallback = segment -> Exceptions.handleInterrupted(() -> latch.await());
UUID cid = UUID.randomUUID();
PravegaNodeUri uri = new PravegaNodeUri("endpoint", SERVICE_PORT);
MockConnectionFactoryImpl cf = new MockConnectionFactoryImpl();
cf.setExecutor(executorService());
MockController controller = new MockController(uri.getEndpoint(), uri.getPort(), cf, true);
ClientConnection connection = mock(ClientConnection.class);
cf.provideConnection(uri, connection);
InOrder order = Mockito.inOrder(connection);
@SuppressWarnings("resource") SegmentOutputStreamImpl output = new SegmentOutputStreamImpl(SEGMENT, true, controller, cf, cid, segmentSealedCallback, RETRY_SCHEDULE, DelegationTokenProviderFactory.createWithEmptyToken());
output.reconnect();
order.verify(connection).send(new SetupAppend(output.getRequestId(), cid, SEGMENT, ""));
cf.getProcessor(uri).appendSetup(new AppendSetup(output.getRequestId(), SEGMENT, cid, 0));
ByteBuffer data = getBuffer("test");
CompletableFuture<Void> ack = new CompletableFuture<>();
output.write(PendingEvent.withoutHeader(null, data, ack));
order.verify(connection).send(new Append(SEGMENT, cid, 1, 1, Unpooled.wrappedBuffer(data), null, output.getRequestId()));
assertEquals(false, ack.isDone());
@Cleanup("shutdownNow") ScheduledExecutorService executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "netty-callback");
// simulate a SegmentIsSealed WireCommand from SegmentStore.
executor.submit(() -> cf.getProcessor(uri).segmentIsSealed(new WireCommands.SegmentIsSealed(output.getRequestId(), SEGMENT, "SomeException", 1)));
AssertExtensions.assertBlocks(() -> {
AssertExtensions.assertThrows(SegmentSealedException.class, () -> output.flush());
}, () -> latch.release());
AssertExtensions.assertThrows(SegmentSealedException.class, () -> output.flush());
}
use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class SegmentOutputStreamTest method testSegmentSealedFollowedbyConnectionDrop.
@Test(timeout = 10000)
public void testSegmentSealedFollowedbyConnectionDrop() throws Exception {
@Cleanup("shutdownNow") ScheduledExecutorService executor = ExecutorServiceHelpers.newScheduledThreadPool(2, "netty-callback");
// Segment sealed callback will finish execution only when the releaseCallbackLatch is released;
ReusableLatch releaseCallbackLatch = new ReusableLatch(false);
ReusableLatch callBackInvokedLatch = new ReusableLatch(false);
final Consumer<Segment> segmentSealedCallback = segment -> Exceptions.handleInterrupted(() -> {
callBackInvokedLatch.release();
releaseCallbackLatch.await();
});
// Setup mocks.
UUID cid = UUID.randomUUID();
PravegaNodeUri uri = new PravegaNodeUri("endpoint", SERVICE_PORT);
MockConnectionFactoryImpl cf = new MockConnectionFactoryImpl();
cf.setExecutor(executorService());
MockController controller = new MockController(uri.getEndpoint(), uri.getPort(), cf, true);
// Mock client connection that is returned for every invocation of ConnectionFactory#establishConnection.
ClientConnection connection = mock(ClientConnection.class);
cf.provideConnection(uri, connection);
InOrder order = Mockito.inOrder(connection);
// Create a Segment writer.
@SuppressWarnings("resource") SegmentOutputStreamImpl output = new SegmentOutputStreamImpl(SEGMENT, true, controller, cf, cid, segmentSealedCallback, RETRY_SCHEDULE, DelegationTokenProviderFactory.createWithEmptyToken());
// trigger establishment of connection.
output.reconnect();
// Verify if SetupAppend is sent over the connection.
order.verify(connection).send(new SetupAppend(output.getRequestId(), cid, SEGMENT, ""));
cf.getProcessor(uri).appendSetup(new AppendSetup(output.getRequestId(), SEGMENT, cid, 0));
// Write an event and ensure inflight has an event.
ByteBuffer data = getBuffer("test");
CompletableFuture<Void> ack = new CompletableFuture<>();
output.write(PendingEvent.withoutHeader(null, data, ack));
order.verify(connection).send(new Append(SEGMENT, cid, 1, 1, Unpooled.wrappedBuffer(data), null, output.getRequestId()));
assertFalse(ack.isDone());
// Simulate a SegmentIsSealed WireCommand from SegmentStore.
executor.submit(() -> cf.getProcessor(uri).segmentIsSealed(new WireCommands.SegmentIsSealed(output.getRequestId(), SEGMENT, "SomeException", 1)));
// Wait until callback invocation has been triggered, but has not completed.
// If the callback is not invoked the test will fail due to a timeout.
callBackInvokedLatch.await();
// Now trigger a connection drop netty callback and wait until it is executed.
executor.submit(() -> cf.getProcessor(uri).connectionDropped()).get();
// close is invoked on the connection.
order.verify(connection).close();
// Verify no further reconnection attempts which involves sending of SetupAppend wire command.
order.verifyNoMoreInteractions();
// Release latch to ensure the callback is completed.
releaseCallbackLatch.release();
// Verify no further reconnection attempts which involves sending of SetupAppend wire command.
order.verifyNoMoreInteractions();
// Trigger a reconnect again and verify if any new connections are initiated.
output.reconnect();
// Reconnect operation will be executed on the executor service.
ScheduledExecutorService service = executorService();
service.shutdown();
// Wait until all the tasks for reconnect have been completed.
service.awaitTermination(10, TimeUnit.SECONDS);
// Verify no further reconnection attempts which involves sending of SetupAppend wire command.
order.verifyNoMoreInteractions();
}
use of io.pravega.common.util.ReusableLatch in project pravega by pravega.
the class ThreadPoolScheduledExecutorServiceTest method testShutdownNow.
@Test(timeout = 10000)
public void testShutdownNow() throws Exception {
ThreadPoolScheduledExecutorService pool = createPool(1);
AtomicInteger count = new AtomicInteger(0);
ReusableLatch latch = new ReusableLatch(false);
AtomicReference<Exception> error = new AtomicReference<>();
pool.submit(() -> {
count.incrementAndGet();
try {
latch.await();
} catch (Exception e) {
error.set(e);
}
});
pool.submit(() -> count.incrementAndGet());
assertFalse(pool.isShutdown());
assertFalse(pool.isTerminated());
AssertExtensions.assertEventuallyEquals(1, count::get, 5000);
List<Runnable> remaining = pool.shutdownNow();
assertEquals(1, remaining.size());
assertTrue(pool.isShutdown());
AssertExtensions.assertThrows(RejectedExecutionException.class, () -> pool.submit(() -> count.incrementAndGet()));
// No need to call latch.release() because thread should be interupted
assertTrue(pool.awaitTermination(5, SECONDS));
assertTrue(pool.isTerminated());
assertNotNull(error.get());
assertEquals(InterruptedException.class, error.get().getClass());
assertEquals(1, count.get());
}
Aggregations