use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project elasticsearch by elastic.
the class Netty4HttpServerTransport method doStart.
@Override
protected void doStart() {
boolean success = false;
try {
this.serverOpenChannels = new Netty4OpenChannelsHandler(logger);
serverBootstrap = new ServerBootstrap();
serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)));
serverBootstrap.channel(NioServerSocketChannel.class);
serverBootstrap.childHandler(configureServerChannelHandler());
serverBootstrap.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings));
serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings));
final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings);
if (tcpSendBufferSize.getBytes() > 0) {
serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes()));
}
final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings);
if (tcpReceiveBufferSize.getBytes() > 0) {
serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes()));
}
serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator);
serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator);
final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings);
serverBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress);
this.boundAddress = createBoundHttpAddress();
if (logger.isInfoEnabled()) {
logger.info("{}", boundAddress);
}
success = true;
} finally {
if (success == false) {
// otherwise we leak threads since we never moved to started
doStop();
}
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method testServerShutdownLeak.
/*
* This test attempts to use the connection mechanism the same way as ScatterGatherImpl.SingleRequestHandler does.
*
* WARNING: This test has potential failures due to timing.
*/
@Test
public void testServerShutdownLeak() throws Exception {
final NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
final Timer timer = new HashedWheelTimer();
final int minConns = 2;
final int maxConns = 3;
// 10M ms.
final int maxIdleTimeoutMs = 10000000;
final int maxBacklogPerServer = 1;
MyServer server = new MyServer();
Thread.sleep(1000);
// used as a key to pool. Can be anything.
final String serverName = "SomeServer";
final ServerInstance serverInstance = server.getServerInstance();
final MetricsRegistry metricsRegistry = new MetricsRegistry();
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
PooledNettyClientResourceManager resourceManager = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), metric);
ExecutorService executorService = Executors.newCachedThreadPool();
ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(5);
AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection> rmAdapter = new AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection>(serverInstance, resourceManager, executorService, metricsRegistry);
KeyedPool<ServerInstance, NettyClientConnection> keyedPool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(minConns, maxConns, maxIdleTimeoutMs, maxBacklogPerServer, resourceManager, timeoutExecutor, executorService, metricsRegistry);
resourceManager.setPool(keyedPool);
keyedPool.start();
Field keyedPoolMap = KeyedPoolImpl.class.getDeclaredField("_keyedPool");
keyedPoolMap.setAccessible(true);
KeyedFuture<ServerInstance, NettyClientConnection> keyedFuture = keyedPool.checkoutObject(serverInstance);
// The connection pool for this server is created on demand, so we can now get a reference to the _keyedPool.
// The act of calling checkoutObject() creates a new AsyncPoolImpl and places a request for a new connection.
// Since no new connections are available in the beginning, we always end up creating one more than the min.
Map<ServerInstance, AsyncPool<NettyClientConnection>> poolMap = (Map<ServerInstance, AsyncPool<NettyClientConnection>>) keyedPoolMap.get(keyedPool);
AsyncPool<NettyClientConnection> asyncPool = poolMap.get(serverInstance);
Field waiterList = AsyncPoolImpl.class.getDeclaredField("_waiters");
waiterList.setAccessible(true);
LinkedDequeue queue = (LinkedDequeue) waiterList.get(asyncPool);
PoolStats stats;
// If the number of waiters is = 0, then we will error out because the min connections may not have completed
// by the time we check one out. If maxWaiters is > 0, then we may end up initiating a fresh connection while the
// min is still being filled. So, best to sleep a little to make sure that the min pool size is filled out, so that
// the stats are correct.
Thread.sleep(2000L);
stats = asyncPool.getStats();
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
NettyClientConnection conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
// Now get two more connections to the server, since we have 2 idle, we should get those.
// And leak them.
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
// Now we should have 0 idle, and a pool size of 3 with no waiters.
stats = asyncPool.getStats();
Assert.assertEquals(stats.getIdleCount(), 0);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
Assert.assertEquals(queue.size(), 0);
// Now, we will always get an exception because we don't have a free connection to the server.
{
keyedFuture = keyedPool.checkoutObject(serverInstance);
boolean caughtException = false;
LOGGER.debug("Will never get a connection here.");
try {
conn = keyedFuture.getOne(3, TimeUnit.SECONDS);
} catch (TimeoutException e) {
caughtException = true;
}
Assert.assertTrue(caughtException);
keyedFuture.cancel(true);
}
// Now if the server goes down, we should release all three connections and be able to get a successful new connection
LOGGER.info("Shutting down server instance");
server.shutdown();
// Give it time to clean up on the client side.
Thread.sleep(2000L);
stats = asyncPool.getStats();
LOGGER.debug(stats.toString());
// There will be a couple in idleCount in error state.
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns);
LOGGER.debug("Restarting server instance");
server.restart();
Thread.sleep(3000);
LOGGER.debug("Server restart successful\n" + asyncPool.getStats());
// Now get 3 connections successfully
for (int i = 0; i < 3; i++) {
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
Assert.assertNotNull(conn);
}
server.shutdown();
}
use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method testSingleLargeRequestResponse.
@Test
public /**
* Test Single Large ( 2 MB) request response
* @throws Exception
*/
void testSingleLargeRequestResponse() throws Exception {
NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
final String response_prefix = "response_";
final String response = generatePayload(response_prefix, 1024 * 1024 * 2);
MyServer server = new MyServer(response);
Thread.sleep(1000);
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server.getServerInstance(), eventLoopGroup, new HashedWheelTimer(), metric);
try {
LOGGER.info("About to connect the client !!");
boolean connected = clientConn.connect();
LOGGER.info("Client connected !!");
Assert.assertTrue(connected, "connected");
Thread.sleep(1000);
String request_prefix = "request_";
String request = generatePayload(request_prefix, 1024 * 1024 * 2);
LOGGER.info("Sending the request !!");
ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L, 5000L);
LOGGER.info("Request sent !!");
ByteBuf serverResp = serverRespFuture.getOne();
byte[] b2 = new byte[serverResp.readableBytes()];
serverResp.readBytes(b2);
String gotResponse = new String(b2);
Assert.assertTrue(gotResponse.equals(response), "Response Check at client");
Assert.assertTrue(server.getHandler().getRequest().equals(request), "Request Check at server");
} finally {
if (null != clientConn) {
clientConn.close();
}
server.shutdown();
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method testConcurrentRequestDispatchError.
@Test
public void testConcurrentRequestDispatchError() throws Exception {
NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
CountDownLatch latch = new CountDownLatch(1);
MyServer server = new MyServer();
Thread.sleep(1000);
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server.getServerInstance(), eventLoopGroup, new HashedWheelTimer(), metric);
LOGGER.info("About to connect the client !!");
boolean connected = clientConn.connect();
LOGGER.info("Client connected !!");
Assert.assertTrue(connected, "connected");
Thread.sleep(1000);
String request = "dummy request";
LOGGER.info("Sending the request !!");
ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L, 5000L);
boolean gotException = false;
try {
clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L, 5000L);
} catch (IllegalStateException ex) {
gotException = true;
// Second request should have failed.
LOGGER.info("got exception ", ex);
}
latch.countDown();
ByteBuf serverResp = serverRespFuture.getOne();
byte[] b2 = new byte[serverResp.readableBytes()];
serverResp.readBytes(b2);
String gotResponse = new String(b2);
Assert.assertEquals(gotResponse, server.getResponseStr(), "Response Check at client");
Assert.assertEquals(server.getHandler().getRequest(), request, "Request Check at server");
clientConn.close();
server.shutdown();
Assert.assertTrue(gotException, "GotException ");
}
use of org.apache.flink.shaded.netty4.io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method test100LargeRequestResponses.
//@Test
//@Ignore
/**
* Send 100 large ( 2MB) sized request in sequence. Verify each request and response.
* @throws Exception
*/
//@Test
public void test100LargeRequestResponses() throws Exception {
NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
MyServer server = new MyServer(null);
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server.getServerInstance(), eventLoopGroup, new HashedWheelTimer(), metric);
LOGGER.info("About to connect the client !!");
boolean connected = clientConn.connect();
LOGGER.info("Client connected !!");
Assert.assertTrue(connected, "connected");
Thread.sleep(1000);
try {
for (int i = 0; i < 100; i++) {
String request_prefix = "request_";
String request = generatePayload(request_prefix, 1024 * 1024 * 20);
String response_prefix = "response_";
String response = generatePayload(response_prefix, 1024 * 1024 * 20);
server.getHandler().setResponse(response);
//LOG.info("Sending the request (" + request + ")");
ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L, 5000L);
//LOG.info("Request sent !!");
ByteBuf serverResp = serverRespFuture.getOne();
byte[] b2 = new byte[serverResp.readableBytes()];
serverResp.readBytes(b2);
String gotResponse = new String(b2);
Assert.assertEquals(gotResponse, response, "Response Check at client");
Assert.assertEquals(server.getHandler().getRequest(), request, "Request Check at server");
}
} finally {
if (null != clientConn) {
clientConn.close();
}
server.shutdown();
}
}
Aggregations