use of io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method testServerShutdownLeak.
/*
* This test attempts to use the connection mechanism the same way as ScatterGatherImpl.SingleRequestHandler does.
*
* WARNING: This test has potential failures due to timing.
*/
@Test
public void testServerShutdownLeak() throws Exception {
final NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
final Timer timer = new HashedWheelTimer();
final int minConns = 2;
final int maxConns = 3;
// 10M ms.
final int maxIdleTimeoutMs = 10000000;
final int maxBacklogPerServer = 1;
MyServer server = new MyServer();
Thread.sleep(1000);
// used as a key to pool. Can be anything.
final String serverName = "SomeServer";
final ServerInstance serverInstance = server.getServerInstance();
final MetricsRegistry metricsRegistry = new MetricsRegistry();
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
PooledNettyClientResourceManager resourceManager = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), metric);
ExecutorService executorService = Executors.newCachedThreadPool();
ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(5);
AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection> rmAdapter = new AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection>(serverInstance, resourceManager, executorService, metricsRegistry);
KeyedPool<ServerInstance, NettyClientConnection> keyedPool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(minConns, maxConns, maxIdleTimeoutMs, maxBacklogPerServer, resourceManager, timeoutExecutor, executorService, metricsRegistry);
resourceManager.setPool(keyedPool);
keyedPool.start();
Field keyedPoolMap = KeyedPoolImpl.class.getDeclaredField("_keyedPool");
keyedPoolMap.setAccessible(true);
KeyedFuture<ServerInstance, NettyClientConnection> keyedFuture = keyedPool.checkoutObject(serverInstance);
// The connection pool for this server is created on demand, so we can now get a reference to the _keyedPool.
// The act of calling checkoutObject() creates a new AsyncPoolImpl and places a request for a new connection.
// Since no new connections are available in the beginning, we always end up creating one more than the min.
Map<ServerInstance, AsyncPool<NettyClientConnection>> poolMap = (Map<ServerInstance, AsyncPool<NettyClientConnection>>) keyedPoolMap.get(keyedPool);
AsyncPool<NettyClientConnection> asyncPool = poolMap.get(serverInstance);
Field waiterList = AsyncPoolImpl.class.getDeclaredField("_waiters");
waiterList.setAccessible(true);
LinkedDequeue queue = (LinkedDequeue) waiterList.get(asyncPool);
PoolStats stats;
// If the number of waiters is = 0, then we will error out because the min connections may not have completed
// by the time we check one out. If maxWaiters is > 0, then we may end up initiating a fresh connection while the
// min is still being filled. So, best to sleep a little to make sure that the min pool size is filled out, so that
// the stats are correct.
Thread.sleep(2000L);
stats = asyncPool.getStats();
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
NettyClientConnection conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
// Now get two more connections to the server, since we have 2 idle, we should get those.
// And leak them.
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
// Now we should have 0 idle, and a pool size of 3 with no waiters.
stats = asyncPool.getStats();
Assert.assertEquals(stats.getIdleCount(), 0);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
Assert.assertEquals(queue.size(), 0);
// Now, we will always get an exception because we don't have a free connection to the server.
{
keyedFuture = keyedPool.checkoutObject(serverInstance);
boolean caughtException = false;
LOGGER.debug("Will never get a connection here.");
try {
conn = keyedFuture.getOne(3, TimeUnit.SECONDS);
} catch (TimeoutException e) {
caughtException = true;
}
Assert.assertTrue(caughtException);
keyedFuture.cancel(true);
}
// Now if the server goes down, we should release all three connections and be able to get a successful new connection
LOGGER.info("Shutting down server instance");
server.shutdown();
// Give it time to clean up on the client side.
Thread.sleep(2000L);
stats = asyncPool.getStats();
LOGGER.debug(stats.toString());
// There will be a couple in idleCount in error state.
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns);
LOGGER.debug("Restarting server instance");
server.restart();
Thread.sleep(3000);
LOGGER.debug("Server restart successful\n" + asyncPool.getStats());
// Now get 3 connections successfully
for (int i = 0; i < 3; i++) {
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
Assert.assertNotNull(conn);
}
server.shutdown();
}
use of io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method testSingleLargeRequestResponse.
@Test
public /**
* Test Single Large ( 2 MB) request response
* @throws Exception
*/
void testSingleLargeRequestResponse() throws Exception {
NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
final String response_prefix = "response_";
final String response = generatePayload(response_prefix, 1024 * 1024 * 2);
MyServer server = new MyServer(response);
Thread.sleep(1000);
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server.getServerInstance(), eventLoopGroup, new HashedWheelTimer(), metric);
try {
LOGGER.info("About to connect the client !!");
boolean connected = clientConn.connect();
LOGGER.info("Client connected !!");
Assert.assertTrue(connected, "connected");
Thread.sleep(1000);
String request_prefix = "request_";
String request = generatePayload(request_prefix, 1024 * 1024 * 2);
LOGGER.info("Sending the request !!");
ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L, 5000L);
LOGGER.info("Request sent !!");
ByteBuf serverResp = serverRespFuture.getOne();
byte[] b2 = new byte[serverResp.readableBytes()];
serverResp.readBytes(b2);
String gotResponse = new String(b2);
Assert.assertTrue(gotResponse.equals(response), "Response Check at client");
Assert.assertTrue(server.getHandler().getRequest().equals(request), "Request Check at server");
} finally {
if (null != clientConn) {
clientConn.close();
}
server.shutdown();
}
}
use of io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method testConcurrentRequestDispatchError.
@Test
public void testConcurrentRequestDispatchError() throws Exception {
NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
CountDownLatch latch = new CountDownLatch(1);
MyServer server = new MyServer();
Thread.sleep(1000);
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server.getServerInstance(), eventLoopGroup, new HashedWheelTimer(), metric);
LOGGER.info("About to connect the client !!");
boolean connected = clientConn.connect();
LOGGER.info("Client connected !!");
Assert.assertTrue(connected, "connected");
Thread.sleep(1000);
String request = "dummy request";
LOGGER.info("Sending the request !!");
ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L, 5000L);
boolean gotException = false;
try {
clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L, 5000L);
} catch (IllegalStateException ex) {
gotException = true;
// Second request should have failed.
LOGGER.info("got exception ", ex);
}
latch.countDown();
ByteBuf serverResp = serverRespFuture.getOne();
byte[] b2 = new byte[serverResp.readableBytes()];
serverResp.readBytes(b2);
String gotResponse = new String(b2);
Assert.assertEquals(gotResponse, server.getResponseStr(), "Response Check at client");
Assert.assertEquals(server.getHandler().getRequest(), request, "Request Check at server");
clientConn.close();
server.shutdown();
Assert.assertTrue(gotException, "GotException ");
}
use of io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method test100LargeRequestResponses.
//@Test
//@Ignore
/**
* Send 100 large ( 2MB) sized request in sequence. Verify each request and response.
* @throws Exception
*/
//@Test
public void test100LargeRequestResponses() throws Exception {
NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
MyServer server = new MyServer(null);
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
NettyTCPClientConnection clientConn = new NettyTCPClientConnection(server.getServerInstance(), eventLoopGroup, new HashedWheelTimer(), metric);
LOGGER.info("About to connect the client !!");
boolean connected = clientConn.connect();
LOGGER.info("Client connected !!");
Assert.assertTrue(connected, "connected");
Thread.sleep(1000);
try {
for (int i = 0; i < 100; i++) {
String request_prefix = "request_";
String request = generatePayload(request_prefix, 1024 * 1024 * 20);
String response_prefix = "response_";
String response = generatePayload(response_prefix, 1024 * 1024 * 20);
server.getHandler().setResponse(response);
//LOG.info("Sending the request (" + request + ")");
ResponseFuture serverRespFuture = clientConn.sendRequest(Unpooled.wrappedBuffer(request.getBytes()), 1L, 5000L);
//LOG.info("Request sent !!");
ByteBuf serverResp = serverRespFuture.getOne();
byte[] b2 = new byte[serverResp.readableBytes()];
serverResp.readBytes(b2);
String gotResponse = new String(b2);
Assert.assertEquals(gotResponse, response, "Response Check at client");
Assert.assertEquals(server.getHandler().getRequest(), request, "Request Check at server");
}
} finally {
if (null != clientConn) {
clientConn.close();
}
server.shutdown();
}
}
use of io.netty.channel.nio.NioEventLoopGroup in project pinot by linkedin.
the class ScatterGatherTest method testMultipleServerTimeout.
@Test
public void testMultipleServerTimeout() throws Exception {
MetricsRegistry registry = new MetricsRegistry();
// Server start
int serverPort1 = 7081;
int serverPort2 = 7082;
int serverPort3 = 7083;
// Timeout server
int serverPort4 = 7084;
NettyTCPServer server1 = new NettyTCPServer(serverPort1, new TestRequestHandlerFactory(0, 1), null);
NettyTCPServer server2 = new NettyTCPServer(serverPort2, new TestRequestHandlerFactory(1, 1), null);
NettyTCPServer server3 = new NettyTCPServer(serverPort3, new TestRequestHandlerFactory(2, 1), null);
NettyTCPServer server4 = new NettyTCPServer(serverPort4, new TestRequestHandlerFactory(3, 1, 7000, false), null);
Thread t1 = new Thread(server1);
Thread t2 = new Thread(server2);
Thread t3 = new Thread(server3);
Thread t4 = new Thread(server4);
t1.start();
t2.start();
t3.start();
t4.start();
//Client setup
ScheduledExecutorService timedExecutor = new ScheduledThreadPoolExecutor(1);
ExecutorService service = new ThreadPoolExecutor(5, 5, 5, TimeUnit.DAYS, new LinkedBlockingDeque<Runnable>());
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
NettyClientMetrics clientMetrics = new NettyClientMetrics(registry, "client_");
PooledNettyClientResourceManager rm = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), clientMetrics);
KeyedPoolImpl<ServerInstance, NettyClientConnection> pool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(1, 1, 300000, 1, rm, timedExecutor, service, registry);
rm.setPool(pool);
SegmentIdSet pg1 = new SegmentIdSet();
pg1.addSegment(new SegmentId("0"));
SegmentIdSet pg2 = new SegmentIdSet();
pg2.addSegment(new SegmentId("1"));
SegmentIdSet pg3 = new SegmentIdSet();
pg3.addSegment(new SegmentId("2"));
SegmentIdSet pg4 = new SegmentIdSet();
pg4.addSegment(new SegmentId("3"));
ServerInstance serverInstance1 = new ServerInstance("localhost", serverPort1);
ServerInstance serverInstance2 = new ServerInstance("localhost", serverPort2);
ServerInstance serverInstance3 = new ServerInstance("localhost", serverPort3);
ServerInstance serverInstance4 = new ServerInstance("localhost", serverPort4);
Map<ServerInstance, SegmentIdSet> pgMap = new HashMap<ServerInstance, SegmentIdSet>();
pgMap.put(serverInstance1, pg1);
pgMap.put(serverInstance2, pg2);
pgMap.put(serverInstance3, pg3);
pgMap.put(serverInstance4, pg4);
String request1 = "request_0";
String request2 = "request_1";
String request3 = "request_2";
String request4 = "request_3";
Map<SegmentIdSet, String> pgMapStr = new HashMap<SegmentIdSet, String>();
pgMapStr.put(pg1, request1);
pgMapStr.put(pg2, request2);
pgMapStr.put(pg3, request3);
pgMapStr.put(pg4, request4);
ScatterGatherRequest req = new TestScatterGatherRequest(pgMap, pgMapStr, new RoundRobinReplicaSelection(), ReplicaSelectionGranularity.SEGMENT_ID_SET, 0, 1000);
ScatterGatherImpl scImpl = new ScatterGatherImpl(pool, service);
final ScatterGatherStats scatterGatherStats = new ScatterGatherStats();
BrokerMetrics brokerMetrics = new BrokerMetrics(new MetricsRegistry());
CompositeFuture<ServerInstance, ByteBuf> fut = scImpl.scatterGather(req, scatterGatherStats, brokerMetrics);
Map<ServerInstance, ByteBuf> v = fut.get();
//Only 3 servers return value.
Assert.assertEquals(v.size(), 3);
ByteBuf b = v.get(serverInstance1);
byte[] b2 = new byte[b.readableBytes()];
b.readBytes(b2);
String response = new String(b2);
Assert.assertEquals(response, "response_0_0");
b = v.get(serverInstance2);
b2 = new byte[b.readableBytes()];
b.readBytes(b2);
response = new String(b2);
Assert.assertEquals(response, "response_1_0");
b = v.get(serverInstance3);
b2 = new byte[b.readableBytes()];
b.readBytes(b2);
response = new String(b2);
Assert.assertEquals(response, "response_2_0");
//No response from 4th server
Assert.assertNull(v.get(serverInstance4), "No response from 4th server");
Map<ServerInstance, Throwable> errorMap = fut.getError();
Assert.assertEquals(errorMap.size(), 1, "One error");
Assert.assertNotNull(errorMap.get(serverInstance4), "Server4 returned timeout");
Thread.sleep(3000);
pool.getStats().refresh();
Assert.assertEquals(pool.getStats().getTotalBadDestroyed(), 1, "Total Bad destroyed");
pool.shutdown();
service.shutdown();
eventLoopGroup.shutdownGracefully();
server1.shutdownGracefully();
server2.shutdownGracefully();
server3.shutdownGracefully();
server4.shutdownGracefully();
}
Aggregations