Search in sources :

Example 31 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project camel by apache.

the class Mina2TcpWithInOutTest method testMinaRouteWithInOut.

@Test
public void testMinaRouteWithInOut() throws Exception {
    latch = new CountDownLatch(1);
    uri = String.format("mina2:tcp://localhost:%1$s?textline=true", getPort());
    Mina2ReverserServer server = new Mina2ReverserServer(getPort());
    server.start();
    context.addRoutes(new RouteBuilder() {

        @Override
        public void configure() throws Exception {
            from("direct:x").to(uri).process(new Processor() {

                public void process(Exchange e) {
                    receivedExchange = e;
                    latch.countDown();
                }
            });
        }
    });
    context.start();
    // now lets fire in a message
    Endpoint endpoint = context.getEndpoint("direct:x");
    Exchange exchange = endpoint.createExchange(ExchangePattern.InOut);
    Message message = exchange.getIn();
    message.setBody("Hello!");
    message.setHeader("cheese", 123);
    Producer producer = endpoint.createProducer();
    producer.start();
    producer.process(exchange);
    // now lets sleep for a while
    assertTrue(latch.await(5, TimeUnit.SECONDS));
    assertNotNull(receivedExchange.getIn());
    assertEquals("!olleH", receivedExchange.getIn().getBody());
    producer.stop();
    context.stop();
    server.stop();
}
Also used : Exchange(org.apache.camel.Exchange) Processor(org.apache.camel.Processor) RouteBuilder(org.apache.camel.builder.RouteBuilder) Endpoint(org.apache.camel.Endpoint) Message(org.apache.camel.Message) Producer(org.apache.camel.Producer) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 32 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project camel by apache.

the class MongoDbTailingProcess method run.

/**
     * The heart of the tailing process.
     */
@Override
public void run() {
    stoppedLatch = new CountDownLatch(1);
    while (keepRunning) {
        doRun();
        // if the previous call didn't return because we have stopped running, then regenerate the cursor
        if (keepRunning) {
            cursor.close();
            if (LOG.isDebugEnabled()) {
                LOG.debug("Regenerating cursor with lastVal: {}, waiting {}ms first", tailTracking.lastVal, cursorRegenerationDelay);
            }
            if (cursorRegenerationDelayEnabled) {
                try {
                    Thread.sleep(cursorRegenerationDelay);
                } catch (InterruptedException e) {
                // ignore
                }
            }
            cursor = initializeCursor();
        }
    }
    stopped = true;
    stoppedLatch.countDown();
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch)

Example 33 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project flink by apache.

the class CancelPartitionRequestTest method testDuplicateCancel.

@Test
public void testDuplicateCancel() throws Exception {
    NettyServerAndClient serverAndClient = null;
    try {
        final TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16);
        ResultPartitionManager partitions = mock(ResultPartitionManager.class);
        ResultPartitionID pid = new ResultPartitionID();
        final CountDownLatch sync = new CountDownLatch(1);
        final ResultSubpartitionView view = spy(new InfiniteSubpartitionView(outboundBuffers, sync));
        // Return infinite subpartition
        when(partitions.createSubpartitionView(eq(pid), eq(0), any(BufferProvider.class), any(BufferAvailabilityListener.class))).thenAnswer(new Answer<ResultSubpartitionView>() {

            @Override
            public ResultSubpartitionView answer(InvocationOnMock invocationOnMock) throws Throwable {
                BufferAvailabilityListener listener = (BufferAvailabilityListener) invocationOnMock.getArguments()[3];
                listener.notifyBuffersAvailable(Long.MAX_VALUE);
                return view;
            }
        });
        PartitionRequestProtocol protocol = new PartitionRequestProtocol(partitions, mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class));
        serverAndClient = initServerAndClient(protocol);
        Channel ch = connect(serverAndClient);
        // Request for non-existing input channel => results in cancel request
        InputChannelID inputChannelId = new InputChannelID();
        ch.writeAndFlush(new PartitionRequest(pid, 0, inputChannelId)).await();
        // Wait for the notification
        if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
            fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about cancelled partition.");
        }
        ch.writeAndFlush(new CancelPartitionRequest(inputChannelId)).await();
        ch.close();
        NettyTestUtil.awaitClose(ch);
        verify(view, times(1)).releaseAllResources();
        verify(view, times(0)).notifySubpartitionConsumed();
    } finally {
        shutdown(serverAndClient);
    }
}
Also used : TestPooledBufferProvider(org.apache.flink.runtime.io.network.util.TestPooledBufferProvider) ResultSubpartitionView(org.apache.flink.runtime.io.network.partition.ResultSubpartitionView) Channel(io.netty.channel.Channel) PartitionRequest(org.apache.flink.runtime.io.network.netty.NettyMessage.PartitionRequest) CancelPartitionRequest(org.apache.flink.runtime.io.network.netty.NettyMessage.CancelPartitionRequest) ResultPartitionManager(org.apache.flink.runtime.io.network.partition.ResultPartitionManager) CountDownLatch(java.util.concurrent.CountDownLatch) NetworkBufferPool(org.apache.flink.runtime.io.network.buffer.NetworkBufferPool) InvocationOnMock(org.mockito.invocation.InvocationOnMock) InputChannelID(org.apache.flink.runtime.io.network.partition.consumer.InputChannelID) CancelPartitionRequest(org.apache.flink.runtime.io.network.netty.NettyMessage.CancelPartitionRequest) BufferAvailabilityListener(org.apache.flink.runtime.io.network.partition.BufferAvailabilityListener) BufferProvider(org.apache.flink.runtime.io.network.buffer.BufferProvider) TestPooledBufferProvider(org.apache.flink.runtime.io.network.util.TestPooledBufferProvider) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) TaskEventDispatcher(org.apache.flink.runtime.io.network.TaskEventDispatcher) NettyServerAndClient(org.apache.flink.runtime.io.network.netty.NettyTestUtil.NettyServerAndClient) Test(org.junit.Test)

Example 34 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project flink by apache.

the class ClientTransportErrorHandlingTest method testExceptionOnWrite.

/**
	 * Verifies that failed client requests via {@link PartitionRequestClient} are correctly
	 * attributed to the respective {@link RemoteInputChannel}.
	 */
@Test
public void testExceptionOnWrite() throws Exception {
    NettyProtocol protocol = new NettyProtocol() {

        @Override
        public ChannelHandler[] getServerChannelHandlers() {
            return new ChannelHandler[0];
        }

        @Override
        public ChannelHandler[] getClientChannelHandlers() {
            return new PartitionRequestProtocol(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class)).getClientChannelHandlers();
        }
    };
    // We need a real server and client in this test, because Netty's EmbeddedChannel is
    // not failing the ChannelPromise of failed writes.
    NettyServerAndClient serverAndClient = initServerAndClient(protocol, createConfig());
    Channel ch = connect(serverAndClient);
    PartitionRequestClientHandler handler = getClientHandler(ch);
    // Last outbound handler throws Exception after 1st write
    ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() {

        int writeNum = 0;

        @Override
        public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
            if (writeNum >= 1) {
                throw new RuntimeException("Expected test exception.");
            }
            writeNum++;
            ctx.write(msg, promise);
        }
    });
    PartitionRequestClient requestClient = new PartitionRequestClient(ch, handler, mock(ConnectionID.class), mock(PartitionRequestClientFactory.class));
    // Create input channels
    RemoteInputChannel[] rich = new RemoteInputChannel[] { createRemoteInputChannel(), createRemoteInputChannel() };
    final CountDownLatch sync = new CountDownLatch(1);
    // Do this with explicit synchronization. Otherwise this is not robust against slow timings
    // of the callback (e.g. we cannot just verify that it was called once, because there is
    // a chance that we do this too early).
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            sync.countDown();
            return null;
        }
    }).when(rich[1]).onError(isA(LocalTransportException.class));
    // First request is successful
    ChannelFuture f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[0], 0);
    assertTrue(f.await().isSuccess());
    // Second request is *not* successful
    f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[1], 0);
    assertFalse(f.await().isSuccess());
    // Only the second channel should be notified about the error
    verify(rich[0], times(0)).onError(any(LocalTransportException.class));
    // Wait for the notification
    if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
        fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about the channel error.");
    }
    shutdown(serverAndClient);
}
Also used : ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) ChannelPromise(io.netty.channel.ChannelPromise) ChannelHandler(io.netty.channel.ChannelHandler) RemoteInputChannel(org.apache.flink.runtime.io.network.partition.consumer.RemoteInputChannel) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) ChannelFuture(io.netty.channel.ChannelFuture) RemoteInputChannel(org.apache.flink.runtime.io.network.partition.consumer.RemoteInputChannel) EmbeddedChannel(io.netty.channel.embedded.EmbeddedChannel) Channel(io.netty.channel.Channel) ChannelOutboundHandlerAdapter(io.netty.channel.ChannelOutboundHandlerAdapter) LocalTransportException(org.apache.flink.runtime.io.network.netty.exception.LocalTransportException) CountDownLatch(java.util.concurrent.CountDownLatch) LocalTransportException(org.apache.flink.runtime.io.network.netty.exception.LocalTransportException) RemoteTransportException(org.apache.flink.runtime.io.network.netty.exception.RemoteTransportException) IOException(java.io.IOException) ConnectionID(org.apache.flink.runtime.io.network.ConnectionID) InvocationOnMock(org.mockito.invocation.InvocationOnMock) NettyServerAndClient(org.apache.flink.runtime.io.network.netty.NettyTestUtil.NettyServerAndClient) Test(org.junit.Test)

Example 35 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project flink by apache.

the class PartitionRequestClientFactoryTest method testResourceReleaseAfterInterruptedConnect.

@Test
public void testResourceReleaseAfterInterruptedConnect() throws Exception {
    // Latch to synchronize on the connect call.
    final CountDownLatch syncOnConnect = new CountDownLatch(1);
    final Tuple2<NettyServer, NettyClient> netty = createNettyServerAndClient(new NettyProtocol() {

        @Override
        public ChannelHandler[] getServerChannelHandlers() {
            return new ChannelHandler[0];
        }

        @Override
        public ChannelHandler[] getClientChannelHandlers() {
            return new ChannelHandler[] { new CountDownLatchOnConnectHandler(syncOnConnect) };
        }
    });
    final NettyServer server = netty.f0;
    final NettyClient client = netty.f1;
    final UncaughtTestExceptionHandler exceptionHandler = new UncaughtTestExceptionHandler();
    try {
        final PartitionRequestClientFactory factory = new PartitionRequestClientFactory(client);
        final Thread connect = new Thread(new Runnable() {

            @Override
            public void run() {
                ConnectionID serverAddress = null;
                try {
                    serverAddress = createServerConnectionID(0);
                    // This triggers a connect
                    factory.createPartitionRequestClient(serverAddress);
                } catch (Throwable t) {
                    if (serverAddress != null) {
                        factory.closeOpenChannelConnections(serverAddress);
                        Thread.getDefaultUncaughtExceptionHandler().uncaughtException(Thread.currentThread(), t);
                    } else {
                        t.printStackTrace();
                        fail("Could not create RemoteAddress for server.");
                    }
                }
            }
        });
        connect.setUncaughtExceptionHandler(exceptionHandler);
        connect.start();
        // Wait on the connect
        syncOnConnect.await();
        connect.interrupt();
        connect.join();
        // Make sure that after a failed connect all resources are cleared.
        assertEquals(0, factory.getNumberOfActiveClients());
        // Make sure that the interrupt exception is not swallowed
        assertTrue(exceptionHandler.getErrors().size() > 0);
    } finally {
        if (server != null) {
            server.shutdown();
        }
        if (client != null) {
            client.shutdown();
        }
    }
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) ConnectionID(org.apache.flink.runtime.io.network.ConnectionID) Test(org.junit.Test)

Aggregations

CountDownLatch (java.util.concurrent.CountDownLatch)5355 Test (org.junit.Test)2594 IOException (java.io.IOException)631 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)550 AtomicReference (java.util.concurrent.atomic.AtomicReference)501 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)475 ArrayList (java.util.ArrayList)471 QuickTest (com.hazelcast.test.annotation.QuickTest)375 ParallelTest (com.hazelcast.test.annotation.ParallelTest)355 ExecutorService (java.util.concurrent.ExecutorService)322 Test (org.testng.annotations.Test)310 HazelcastInstance (com.hazelcast.core.HazelcastInstance)251 List (java.util.List)212 HashMap (java.util.HashMap)207 HttpServletResponse (javax.servlet.http.HttpServletResponse)207 ExecutionException (java.util.concurrent.ExecutionException)203 HttpServletRequest (javax.servlet.http.HttpServletRequest)189 Ignite (org.apache.ignite.Ignite)188 ServletException (javax.servlet.ServletException)183 TimeoutException (java.util.concurrent.TimeoutException)168