Search in sources :

Example 6 with NioEventLoopGroup

use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.

the class ScatterGatherTest method testMultipleServerTimeout.

@Test
public void testMultipleServerTimeout() throws Exception {
    MetricsRegistry registry = new MetricsRegistry();
    // Server start
    int serverPort1 = 7081;
    int serverPort2 = 7082;
    int serverPort3 = 7083;
    // Timeout server
    int serverPort4 = 7084;
    NettyTCPServer server1 = new NettyTCPServer(serverPort1, new TestRequestHandlerFactory(0, 1), null);
    NettyTCPServer server2 = new NettyTCPServer(serverPort2, new TestRequestHandlerFactory(1, 1), null);
    NettyTCPServer server3 = new NettyTCPServer(serverPort3, new TestRequestHandlerFactory(2, 1), null);
    NettyTCPServer server4 = new NettyTCPServer(serverPort4, new TestRequestHandlerFactory(3, 1, 7000, false), null);
    Thread t1 = new Thread(server1);
    Thread t2 = new Thread(server2);
    Thread t3 = new Thread(server3);
    Thread t4 = new Thread(server4);
    t1.start();
    t2.start();
    t3.start();
    t4.start();
    //Client setup
    ScheduledExecutorService timedExecutor = new ScheduledThreadPoolExecutor(1);
    ExecutorService service = new ThreadPoolExecutor(5, 5, 5, TimeUnit.DAYS, new LinkedBlockingDeque<Runnable>());
    EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    NettyClientMetrics clientMetrics = new NettyClientMetrics(registry, "client_");
    PooledNettyClientResourceManager rm = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), clientMetrics);
    KeyedPoolImpl<ServerInstance, NettyClientConnection> pool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(1, 1, 300000, 1, rm, timedExecutor, service, registry);
    rm.setPool(pool);
    SegmentIdSet pg1 = new SegmentIdSet();
    pg1.addSegment(new SegmentId("0"));
    SegmentIdSet pg2 = new SegmentIdSet();
    pg2.addSegment(new SegmentId("1"));
    SegmentIdSet pg3 = new SegmentIdSet();
    pg3.addSegment(new SegmentId("2"));
    SegmentIdSet pg4 = new SegmentIdSet();
    pg4.addSegment(new SegmentId("3"));
    ServerInstance serverInstance1 = new ServerInstance("localhost", serverPort1);
    ServerInstance serverInstance2 = new ServerInstance("localhost", serverPort2);
    ServerInstance serverInstance3 = new ServerInstance("localhost", serverPort3);
    ServerInstance serverInstance4 = new ServerInstance("localhost", serverPort4);
    Map<ServerInstance, SegmentIdSet> pgMap = new HashMap<ServerInstance, SegmentIdSet>();
    pgMap.put(serverInstance1, pg1);
    pgMap.put(serverInstance2, pg2);
    pgMap.put(serverInstance3, pg3);
    pgMap.put(serverInstance4, pg4);
    String request1 = "request_0";
    String request2 = "request_1";
    String request3 = "request_2";
    String request4 = "request_3";
    Map<SegmentIdSet, String> pgMapStr = new HashMap<SegmentIdSet, String>();
    pgMapStr.put(pg1, request1);
    pgMapStr.put(pg2, request2);
    pgMapStr.put(pg3, request3);
    pgMapStr.put(pg4, request4);
    ScatterGatherRequest req = new TestScatterGatherRequest(pgMap, pgMapStr, new RoundRobinReplicaSelection(), ReplicaSelectionGranularity.SEGMENT_ID_SET, 0, 1000);
    ScatterGatherImpl scImpl = new ScatterGatherImpl(pool, service);
    final ScatterGatherStats scatterGatherStats = new ScatterGatherStats();
    BrokerMetrics brokerMetrics = new BrokerMetrics(new MetricsRegistry());
    CompositeFuture<ServerInstance, ByteBuf> fut = scImpl.scatterGather(req, scatterGatherStats, brokerMetrics);
    Map<ServerInstance, ByteBuf> v = fut.get();
    //Only 3 servers return value.
    Assert.assertEquals(v.size(), 3);
    ByteBuf b = v.get(serverInstance1);
    byte[] b2 = new byte[b.readableBytes()];
    b.readBytes(b2);
    String response = new String(b2);
    Assert.assertEquals(response, "response_0_0");
    b = v.get(serverInstance2);
    b2 = new byte[b.readableBytes()];
    b.readBytes(b2);
    response = new String(b2);
    Assert.assertEquals(response, "response_1_0");
    b = v.get(serverInstance3);
    b2 = new byte[b.readableBytes()];
    b.readBytes(b2);
    response = new String(b2);
    Assert.assertEquals(response, "response_2_0");
    //No  response from 4th server
    Assert.assertNull(v.get(serverInstance4), "No response from 4th server");
    Map<ServerInstance, Throwable> errorMap = fut.getError();
    Assert.assertEquals(errorMap.size(), 1, "One error");
    Assert.assertNotNull(errorMap.get(serverInstance4), "Server4 returned timeout");
    Thread.sleep(3000);
    pool.getStats().refresh();
    Assert.assertEquals(pool.getStats().getTotalBadDestroyed(), 1, "Total Bad destroyed");
    pool.shutdown();
    service.shutdown();
    eventLoopGroup.shutdownGracefully();
    server1.shutdownGracefully();
    server2.shutdownGracefully();
    server3.shutdownGracefully();
    server4.shutdownGracefully();
}
Also used : HashMap(java.util.HashMap) ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) PooledNettyClientResourceManager(com.linkedin.pinot.transport.netty.PooledNettyClientResourceManager) ByteBuf(io.netty.buffer.ByteBuf) SegmentIdSet(com.linkedin.pinot.transport.common.SegmentIdSet) ServerInstance(com.linkedin.pinot.common.response.ServerInstance) RoundRobinReplicaSelection(com.linkedin.pinot.transport.common.RoundRobinReplicaSelection) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) MetricsRegistry(com.yammer.metrics.core.MetricsRegistry) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) NettyClientMetrics(com.linkedin.pinot.transport.metrics.NettyClientMetrics) SegmentId(com.linkedin.pinot.transport.common.SegmentId) HashedWheelTimer(io.netty.util.HashedWheelTimer) NettyClientConnection(com.linkedin.pinot.transport.netty.NettyClientConnection) KeyedPoolImpl(com.linkedin.pinot.transport.pool.KeyedPoolImpl) EventLoopGroup(io.netty.channel.EventLoopGroup) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ExecutorService(java.util.concurrent.ExecutorService) NettyTCPServer(com.linkedin.pinot.transport.netty.NettyTCPServer) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) BrokerMetrics(com.linkedin.pinot.common.metrics.BrokerMetrics) Test(org.testng.annotations.Test)

Example 7 with NioEventLoopGroup

use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.

the class ScatterGatherTest method testMultipleServerHappy.

@Test
public void testMultipleServerHappy() throws Exception {
    MetricsRegistry registry = new MetricsRegistry();
    // Server start
    int serverPort1 = 7071;
    int serverPort2 = 7072;
    int serverPort3 = 7073;
    int serverPort4 = 7074;
    NettyTCPServer server1 = new NettyTCPServer(serverPort1, new TestRequestHandlerFactory(0, 1), null);
    NettyTCPServer server2 = new NettyTCPServer(serverPort2, new TestRequestHandlerFactory(1, 1), null);
    NettyTCPServer server3 = new NettyTCPServer(serverPort3, new TestRequestHandlerFactory(2, 1), null);
    NettyTCPServer server4 = new NettyTCPServer(serverPort4, new TestRequestHandlerFactory(3, 1), null);
    Thread t1 = new Thread(server1);
    Thread t2 = new Thread(server2);
    Thread t3 = new Thread(server3);
    Thread t4 = new Thread(server4);
    t1.start();
    t2.start();
    t3.start();
    t4.start();
    //Client setup
    ScheduledExecutorService timedExecutor = new ScheduledThreadPoolExecutor(1);
    ExecutorService poolExecutor = MoreExecutors.sameThreadExecutor();
    ExecutorService service = new ThreadPoolExecutor(1, 1, 1, TimeUnit.DAYS, new LinkedBlockingDeque<Runnable>());
    EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    NettyClientMetrics clientMetrics = new NettyClientMetrics(registry, "client_");
    PooledNettyClientResourceManager rm = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), clientMetrics);
    KeyedPoolImpl<ServerInstance, NettyClientConnection> pool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(1, 1, 300000, 1, rm, timedExecutor, poolExecutor, registry);
    rm.setPool(pool);
    SegmentIdSet pg1 = new SegmentIdSet();
    pg1.addSegment(new SegmentId("0"));
    SegmentIdSet pg2 = new SegmentIdSet();
    pg2.addSegment(new SegmentId("1"));
    SegmentIdSet pg3 = new SegmentIdSet();
    pg3.addSegment(new SegmentId("2"));
    SegmentIdSet pg4 = new SegmentIdSet();
    pg4.addSegment(new SegmentId("3"));
    ServerInstance serverInstance1 = new ServerInstance("localhost", serverPort1);
    ServerInstance serverInstance2 = new ServerInstance("localhost", serverPort2);
    ServerInstance serverInstance3 = new ServerInstance("localhost", serverPort3);
    ServerInstance serverInstance4 = new ServerInstance("localhost", serverPort4);
    Map<ServerInstance, SegmentIdSet> pgMap = new HashMap<ServerInstance, SegmentIdSet>();
    pgMap.put(serverInstance1, pg1);
    pgMap.put(serverInstance2, pg2);
    pgMap.put(serverInstance3, pg3);
    pgMap.put(serverInstance4, pg4);
    String request1 = "request_0";
    String request2 = "request_1";
    String request3 = "request_2";
    String request4 = "request_3";
    Map<SegmentIdSet, String> pgMapStr = new HashMap<SegmentIdSet, String>();
    pgMapStr.put(pg1, request1);
    pgMapStr.put(pg2, request2);
    pgMapStr.put(pg3, request3);
    pgMapStr.put(pg4, request4);
    ScatterGatherRequest req = new TestScatterGatherRequest(pgMap, pgMapStr);
    ScatterGatherImpl scImpl = new ScatterGatherImpl(pool, service);
    final ScatterGatherStats scatterGatherStats = new ScatterGatherStats();
    BrokerMetrics brokerMetrics = new BrokerMetrics(new MetricsRegistry());
    CompositeFuture<ServerInstance, ByteBuf> fut = scImpl.scatterGather(req, scatterGatherStats, brokerMetrics);
    Map<ServerInstance, ByteBuf> v = fut.get();
    Assert.assertEquals(v.size(), 4);
    ByteBuf b = v.get(serverInstance1);
    byte[] b2 = new byte[b.readableBytes()];
    b.readBytes(b2);
    String response = new String(b2);
    Assert.assertEquals(response, "response_0_0");
    b = v.get(serverInstance2);
    b2 = new byte[b.readableBytes()];
    b.readBytes(b2);
    response = new String(b2);
    Assert.assertEquals(response, "response_1_0");
    b = v.get(serverInstance3);
    b2 = new byte[b.readableBytes()];
    b.readBytes(b2);
    response = new String(b2);
    Assert.assertEquals(response, "response_2_0");
    b = v.get(serverInstance4);
    b2 = new byte[b.readableBytes()];
    b.readBytes(b2);
    response = new String(b2);
    Assert.assertEquals(response, "response_3_0");
    server1.shutdownGracefully();
    server2.shutdownGracefully();
    server3.shutdownGracefully();
    server4.shutdownGracefully();
    pool.shutdown();
    service.shutdown();
    eventLoopGroup.shutdownGracefully();
}
Also used : HashMap(java.util.HashMap) ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) PooledNettyClientResourceManager(com.linkedin.pinot.transport.netty.PooledNettyClientResourceManager) ByteBuf(io.netty.buffer.ByteBuf) SegmentIdSet(com.linkedin.pinot.transport.common.SegmentIdSet) ServerInstance(com.linkedin.pinot.common.response.ServerInstance) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) MetricsRegistry(com.yammer.metrics.core.MetricsRegistry) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) NettyClientMetrics(com.linkedin.pinot.transport.metrics.NettyClientMetrics) SegmentId(com.linkedin.pinot.transport.common.SegmentId) HashedWheelTimer(io.netty.util.HashedWheelTimer) NettyClientConnection(com.linkedin.pinot.transport.netty.NettyClientConnection) KeyedPoolImpl(com.linkedin.pinot.transport.pool.KeyedPoolImpl) EventLoopGroup(io.netty.channel.EventLoopGroup) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ExecutorService(java.util.concurrent.ExecutorService) NettyTCPServer(com.linkedin.pinot.transport.netty.NettyTCPServer) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) BrokerMetrics(com.linkedin.pinot.common.metrics.BrokerMetrics) Test(org.testng.annotations.Test)

Example 8 with NioEventLoopGroup

use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project camel by apache.

the class LumberjackServer method start.

/**
     * Starts the server.
     *
     * @throws InterruptedException when interrupting while connecting the socket
     */
public void start() throws InterruptedException {
    LOG.info("Starting the LUMBERJACK server (host={}, port={}).", host, port);
    // Create the group that will listen for incoming connections
    bossGroup = new NioEventLoopGroup(1);
    // Create the group that will process the connections
    workerGroup = new NioEventLoopGroup(WORKER_THREADS);
    // Create the executor service that will process the payloads without blocking netty threads
    executorService = new DefaultEventExecutorGroup(WORKER_THREADS, threadFactory);
    // Create the channel initializer
    ChannelHandler initializer = new LumberjackChannelInitializer(sslContext, executorService, messageProcessor);
    // Bootstrap the netty server
    ServerBootstrap serverBootstrap = //
    new ServerBootstrap().group(bossGroup, //
    workerGroup).channel(//
    NioServerSocketChannel.class).option(ChannelOption.SO_BACKLOG, //
    100).childHandler(//
    initializer);
    // Connect the socket
    channel = serverBootstrap.bind(host, port).sync().channel();
    LOG.info("LUMBERJACK server is started (host={}, port={}).", host, port);
}
Also used : DefaultEventExecutorGroup(io.netty.util.concurrent.DefaultEventExecutorGroup) ChannelHandler(io.netty.channel.ChannelHandler) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) ServerBootstrap(io.netty.bootstrap.ServerBootstrap)

Example 9 with NioEventLoopGroup

use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project neo4j by neo4j.

the class NettyServer method start.

@Override
public void start() throws Throwable {
    // The boss thread accepts new incoming connections and chooses a worker thread to be responsible for the
    // IO of the new connection. We expect new connections to be (comparatively) rare, so we allocate a single
    // thread for this.
    // TODO: In fact, dedicating a whole thread to sit and spin in #select for new connections may be a waste of
    // time, we could have the same event loop groups for both handling new connections and for handling events
    // on existing connections
    bossGroup = new NioEventLoopGroup(1, tf);
    // These threads handle live channels. Each thread has a set of channels it is responsible for, and it will
    // continuously run a #select() loop to react to new events on these channels.
    selectorGroup = new NioEventLoopGroup(NUM_SELECTOR_THREADS, tf);
    for (ProtocolInitializer initializer : bootstrappers) {
        try {
            new ServerBootstrap().option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT).group(bossGroup, selectorGroup).channel(NioServerSocketChannel.class).childHandler(initializer.channelInitializer()).bind(initializer.address().socketAddress()).sync();
        } catch (Throwable e) {
            // In any case, we do all this just in order to throw a more helpful bind exception, oh, and here's that part coming right now!
            if (e instanceof BindException) {
                throw new PortBindException(initializer.address(), (BindException) e);
            }
            throw e;
        }
    }
}
Also used : NioServerSocketChannel(io.netty.channel.socket.nio.NioServerSocketChannel) PortBindException(org.neo4j.helpers.PortBindException) BindException(java.net.BindException) PortBindException(org.neo4j.helpers.PortBindException) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) ServerBootstrap(io.netty.bootstrap.ServerBootstrap)

Example 10 with NioEventLoopGroup

use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project pulsar by yahoo.

the class DiscoveryServiceTest method testClientServerConnection.

/**
     * It verifies: client connects to Discovery-service and receives discovery response successfully.
     * 
     * @throws Exception
     */
@Test
public void testClientServerConnection() throws Exception {
    addBrokerToZk(2);
    // 1. client connects to DiscoveryService, 2. Client receive service-lookup response
    final int messageTransfer = 2;
    final CountDownLatch latch = new CountDownLatch(messageTransfer);
    NioEventLoopGroup workerGroup = connectToService(service.getServiceUrl(), latch, false);
    try {
        assertTrue(latch.await(1, TimeUnit.SECONDS));
    } catch (InterruptedException e) {
        fail("should have received lookup response message from server", e);
    }
    workerGroup.shutdownGracefully();
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) Test(org.testng.annotations.Test)

Aggregations

NioEventLoopGroup (io.netty.channel.nio.NioEventLoopGroup)521 EventLoopGroup (io.netty.channel.EventLoopGroup)256 ServerBootstrap (io.netty.bootstrap.ServerBootstrap)226 Bootstrap (io.netty.bootstrap.Bootstrap)184 NioServerSocketChannel (io.netty.channel.socket.nio.NioServerSocketChannel)178 SocketChannel (io.netty.channel.socket.SocketChannel)169 ChannelFuture (io.netty.channel.ChannelFuture)157 NioSocketChannel (io.netty.channel.socket.nio.NioSocketChannel)157 Channel (io.netty.channel.Channel)138 InetSocketAddress (java.net.InetSocketAddress)118 LoggingHandler (io.netty.handler.logging.LoggingHandler)87 ChannelPipeline (io.netty.channel.ChannelPipeline)84 ChannelHandlerContext (io.netty.channel.ChannelHandlerContext)73 SslContext (io.netty.handler.ssl.SslContext)64 SelfSignedCertificate (io.netty.handler.ssl.util.SelfSignedCertificate)49 IOException (java.io.IOException)49 EpollEventLoopGroup (io.netty.channel.epoll.EpollEventLoopGroup)47 ByteBuf (io.netty.buffer.ByteBuf)44 ChannelInboundHandlerAdapter (io.netty.channel.ChannelInboundHandlerAdapter)43 ChannelInitializer (io.netty.channel.ChannelInitializer)41