use of java.util.concurrent.CountDownLatch in project camel by apache.
the class Mina2TcpWithInOutTest method testMinaRouteWithInOut.
@Test
public void testMinaRouteWithInOut() throws Exception {
latch = new CountDownLatch(1);
uri = String.format("mina2:tcp://localhost:%1$s?textline=true", getPort());
Mina2ReverserServer server = new Mina2ReverserServer(getPort());
server.start();
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:x").to(uri).process(new Processor() {
public void process(Exchange e) {
receivedExchange = e;
latch.countDown();
}
});
}
});
context.start();
// now lets fire in a message
Endpoint endpoint = context.getEndpoint("direct:x");
Exchange exchange = endpoint.createExchange(ExchangePattern.InOut);
Message message = exchange.getIn();
message.setBody("Hello!");
message.setHeader("cheese", 123);
Producer producer = endpoint.createProducer();
producer.start();
producer.process(exchange);
// now lets sleep for a while
assertTrue(latch.await(5, TimeUnit.SECONDS));
assertNotNull(receivedExchange.getIn());
assertEquals("!olleH", receivedExchange.getIn().getBody());
producer.stop();
context.stop();
server.stop();
}
use of java.util.concurrent.CountDownLatch in project camel by apache.
the class MongoDbTailingProcess method run.
/**
* The heart of the tailing process.
*/
@Override
public void run() {
stoppedLatch = new CountDownLatch(1);
while (keepRunning) {
doRun();
// if the previous call didn't return because we have stopped running, then regenerate the cursor
if (keepRunning) {
cursor.close();
if (LOG.isDebugEnabled()) {
LOG.debug("Regenerating cursor with lastVal: {}, waiting {}ms first", tailTracking.lastVal, cursorRegenerationDelay);
}
if (cursorRegenerationDelayEnabled) {
try {
Thread.sleep(cursorRegenerationDelay);
} catch (InterruptedException e) {
// ignore
}
}
cursor = initializeCursor();
}
}
stopped = true;
stoppedLatch.countDown();
}
use of java.util.concurrent.CountDownLatch in project flink by apache.
the class CancelPartitionRequestTest method testDuplicateCancel.
@Test
public void testDuplicateCancel() throws Exception {
NettyServerAndClient serverAndClient = null;
try {
final TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16);
ResultPartitionManager partitions = mock(ResultPartitionManager.class);
ResultPartitionID pid = new ResultPartitionID();
final CountDownLatch sync = new CountDownLatch(1);
final ResultSubpartitionView view = spy(new InfiniteSubpartitionView(outboundBuffers, sync));
// Return infinite subpartition
when(partitions.createSubpartitionView(eq(pid), eq(0), any(BufferProvider.class), any(BufferAvailabilityListener.class))).thenAnswer(new Answer<ResultSubpartitionView>() {
@Override
public ResultSubpartitionView answer(InvocationOnMock invocationOnMock) throws Throwable {
BufferAvailabilityListener listener = (BufferAvailabilityListener) invocationOnMock.getArguments()[3];
listener.notifyBuffersAvailable(Long.MAX_VALUE);
return view;
}
});
PartitionRequestProtocol protocol = new PartitionRequestProtocol(partitions, mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class));
serverAndClient = initServerAndClient(protocol);
Channel ch = connect(serverAndClient);
// Request for non-existing input channel => results in cancel request
InputChannelID inputChannelId = new InputChannelID();
ch.writeAndFlush(new PartitionRequest(pid, 0, inputChannelId)).await();
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about cancelled partition.");
}
ch.writeAndFlush(new CancelPartitionRequest(inputChannelId)).await();
ch.close();
NettyTestUtil.awaitClose(ch);
verify(view, times(1)).releaseAllResources();
verify(view, times(0)).notifySubpartitionConsumed();
} finally {
shutdown(serverAndClient);
}
}
use of java.util.concurrent.CountDownLatch in project flink by apache.
the class ClientTransportErrorHandlingTest method testExceptionOnWrite.
/**
* Verifies that failed client requests via {@link PartitionRequestClient} are correctly
* attributed to the respective {@link RemoteInputChannel}.
*/
@Test
public void testExceptionOnWrite() throws Exception {
NettyProtocol protocol = new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
return new ChannelHandler[0];
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new PartitionRequestProtocol(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class)).getClientChannelHandlers();
}
};
// We need a real server and client in this test, because Netty's EmbeddedChannel is
// not failing the ChannelPromise of failed writes.
NettyServerAndClient serverAndClient = initServerAndClient(protocol, createConfig());
Channel ch = connect(serverAndClient);
PartitionRequestClientHandler handler = getClientHandler(ch);
// Last outbound handler throws Exception after 1st write
ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() {
int writeNum = 0;
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (writeNum >= 1) {
throw new RuntimeException("Expected test exception.");
}
writeNum++;
ctx.write(msg, promise);
}
});
PartitionRequestClient requestClient = new PartitionRequestClient(ch, handler, mock(ConnectionID.class), mock(PartitionRequestClientFactory.class));
// Create input channels
RemoteInputChannel[] rich = new RemoteInputChannel[] { createRemoteInputChannel(), createRemoteInputChannel() };
final CountDownLatch sync = new CountDownLatch(1);
// Do this with explicit synchronization. Otherwise this is not robust against slow timings
// of the callback (e.g. we cannot just verify that it was called once, because there is
// a chance that we do this too early).
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
sync.countDown();
return null;
}
}).when(rich[1]).onError(isA(LocalTransportException.class));
// First request is successful
ChannelFuture f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[0], 0);
assertTrue(f.await().isSuccess());
// Second request is *not* successful
f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[1], 0);
assertFalse(f.await().isSuccess());
// Only the second channel should be notified about the error
verify(rich[0], times(0)).onError(any(LocalTransportException.class));
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about the channel error.");
}
shutdown(serverAndClient);
}
use of java.util.concurrent.CountDownLatch in project flink by apache.
the class PartitionRequestClientFactoryTest method testResourceReleaseAfterInterruptedConnect.
@Test
public void testResourceReleaseAfterInterruptedConnect() throws Exception {
// Latch to synchronize on the connect call.
final CountDownLatch syncOnConnect = new CountDownLatch(1);
final Tuple2<NettyServer, NettyClient> netty = createNettyServerAndClient(new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
return new ChannelHandler[0];
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new ChannelHandler[] { new CountDownLatchOnConnectHandler(syncOnConnect) };
}
});
final NettyServer server = netty.f0;
final NettyClient client = netty.f1;
final UncaughtTestExceptionHandler exceptionHandler = new UncaughtTestExceptionHandler();
try {
final PartitionRequestClientFactory factory = new PartitionRequestClientFactory(client);
final Thread connect = new Thread(new Runnable() {
@Override
public void run() {
ConnectionID serverAddress = null;
try {
serverAddress = createServerConnectionID(0);
// This triggers a connect
factory.createPartitionRequestClient(serverAddress);
} catch (Throwable t) {
if (serverAddress != null) {
factory.closeOpenChannelConnections(serverAddress);
Thread.getDefaultUncaughtExceptionHandler().uncaughtException(Thread.currentThread(), t);
} else {
t.printStackTrace();
fail("Could not create RemoteAddress for server.");
}
}
}
});
connect.setUncaughtExceptionHandler(exceptionHandler);
connect.start();
// Wait on the connect
syncOnConnect.await();
connect.interrupt();
connect.join();
// Make sure that after a failed connect all resources are cleared.
assertEquals(0, factory.getNumberOfActiveClients());
// Make sure that the interrupt exception is not swallowed
assertTrue(exceptionHandler.getErrors().size() > 0);
} finally {
if (server != null) {
server.shutdown();
}
if (client != null) {
client.shutdown();
}
}
}
Aggregations