use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class NettySingleConnectionIntegrationTest method testServerShutdownLeak.
/*
* This test attempts to use the connection mechanism the same way as ScatterGatherImpl.SingleRequestHandler does.
*
* WARNING: This test has potential failures due to timing.
*/
@Test
public void testServerShutdownLeak() throws Exception {
final NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
final Timer timer = new HashedWheelTimer();
final int minConns = 2;
final int maxConns = 3;
// 10M ms.
final int maxIdleTimeoutMs = 10000000;
final int maxBacklogPerServer = 1;
MyServer server = new MyServer();
Thread.sleep(1000);
// used as a key to pool. Can be anything.
final String serverName = "SomeServer";
final ServerInstance serverInstance = server.getServerInstance();
final MetricsRegistry metricsRegistry = new MetricsRegistry();
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
PooledNettyClientResourceManager resourceManager = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), metric);
ExecutorService executorService = Executors.newCachedThreadPool();
ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(5);
AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection> rmAdapter = new AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection>(serverInstance, resourceManager, executorService, metricsRegistry);
KeyedPool<ServerInstance, NettyClientConnection> keyedPool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(minConns, maxConns, maxIdleTimeoutMs, maxBacklogPerServer, resourceManager, timeoutExecutor, executorService, metricsRegistry);
resourceManager.setPool(keyedPool);
keyedPool.start();
Field keyedPoolMap = KeyedPoolImpl.class.getDeclaredField("_keyedPool");
keyedPoolMap.setAccessible(true);
KeyedFuture<ServerInstance, NettyClientConnection> keyedFuture = keyedPool.checkoutObject(serverInstance);
// The connection pool for this server is created on demand, so we can now get a reference to the _keyedPool.
// The act of calling checkoutObject() creates a new AsyncPoolImpl and places a request for a new connection.
// Since no new connections are available in the beginning, we always end up creating one more than the min.
Map<ServerInstance, AsyncPool<NettyClientConnection>> poolMap = (Map<ServerInstance, AsyncPool<NettyClientConnection>>) keyedPoolMap.get(keyedPool);
AsyncPool<NettyClientConnection> asyncPool = poolMap.get(serverInstance);
Field waiterList = AsyncPoolImpl.class.getDeclaredField("_waiters");
waiterList.setAccessible(true);
LinkedDequeue queue = (LinkedDequeue) waiterList.get(asyncPool);
PoolStats stats;
// If the number of waiters is = 0, then we will error out because the min connections may not have completed
// by the time we check one out. If maxWaiters is > 0, then we may end up initiating a fresh connection while the
// min is still being filled. So, best to sleep a little to make sure that the min pool size is filled out, so that
// the stats are correct.
Thread.sleep(2000L);
stats = asyncPool.getStats();
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
NettyClientConnection conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
// Now get two more connections to the server, since we have 2 idle, we should get those.
// And leak them.
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
LOGGER.debug("Got connection ID " + conn.getConnId());
// Now we should have 0 idle, and a pool size of 3 with no waiters.
stats = asyncPool.getStats();
Assert.assertEquals(stats.getIdleCount(), 0);
Assert.assertEquals(stats.getPoolSize(), minConns + 1);
Assert.assertEquals(queue.size(), 0);
// Now, we will always get an exception because we don't have a free connection to the server.
{
keyedFuture = keyedPool.checkoutObject(serverInstance);
boolean caughtException = false;
LOGGER.debug("Will never get a connection here.");
try {
conn = keyedFuture.getOne(3, TimeUnit.SECONDS);
} catch (TimeoutException e) {
caughtException = true;
}
Assert.assertTrue(caughtException);
keyedFuture.cancel(true);
}
// Now if the server goes down, we should release all three connections and be able to get a successful new connection
LOGGER.info("Shutting down server instance");
server.shutdown();
// Give it time to clean up on the client side.
Thread.sleep(2000L);
stats = asyncPool.getStats();
LOGGER.debug(stats.toString());
// There will be a couple in idleCount in error state.
Assert.assertEquals(stats.getIdleCount(), minConns);
Assert.assertEquals(stats.getPoolSize(), minConns);
LOGGER.debug("Restarting server instance");
server.restart();
Thread.sleep(3000);
LOGGER.debug("Server restart successful\n" + asyncPool.getStats());
// Now get 3 connections successfully
for (int i = 0; i < 3; i++) {
keyedFuture = keyedPool.checkoutObject(serverInstance);
conn = keyedFuture.getOne();
Assert.assertNotNull(conn);
}
server.shutdown();
}
use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class ServerToSegmentSetMap method toString.
@Override
public String toString() {
try {
JSONObject ret = new JSONObject();
for (ServerInstance i : _routingTable.keySet()) {
JSONArray serverInstanceSegmentList = new JSONArray();
List<String> sortedSegmentIds = new ArrayList<>();
for (SegmentId segmentId : _routingTable.get(i).getSegments()) {
sortedSegmentIds.add(segmentId.getSegmentId());
}
Collections.sort(sortedSegmentIds);
for (String sortedSegmentId : sortedSegmentIds) {
serverInstanceSegmentList.put(sortedSegmentId);
}
ret.put(i.toString(), serverInstanceSegmentList);
}
return ret.toString();
} catch (Exception e) {
logger.error("error toString()", e);
return "routing table : [ " + _routingTable + " ] ";
}
}
use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class ScatterGatherImpl method sendRequest.
/**
*
* Helper Function to send scatter-request. This method should be called after the servers are selected
*
* @param ctxt Scatter-Gather Request context with selected servers for each request.
* @param scatterGatherStats scatter-gather statistics.
* @param isOfflineTable whether the scatter-gather target is an OFFLINE table.
* @param brokerMetrics broker metrics to track execution statistics.
* @return a composite future representing the gather process.
* @throws InterruptedException
*/
protected CompositeFuture<ServerInstance, ByteBuf> sendRequest(ScatterGatherRequestContext ctxt, ScatterGatherStats scatterGatherStats, Boolean isOfflineTable, BrokerMetrics brokerMetrics) throws InterruptedException {
TimerContext t = MetricsHelper.startTimer();
// Servers are expected to be selected at this stage
Map<ServerInstance, SegmentIdSet> mp = ctxt.getSelectedServers();
CountDownLatch requestDispatchLatch = new CountDownLatch(mp.size());
// async checkout of connections and then dispatch of request
List<SingleRequestHandler> handlers = new ArrayList<SingleRequestHandler>(mp.size());
for (Entry<ServerInstance, SegmentIdSet> e : mp.entrySet()) {
ServerInstance server = e.getKey();
String serverName = server.toString();
if (isOfflineTable != null) {
if (isOfflineTable) {
serverName += ScatterGatherStats.OFFLINE_TABLE_SUFFIX;
} else {
serverName += ScatterGatherStats.REALTIME_TABLE_SUFFIX;
}
}
scatterGatherStats.initServer(serverName);
SingleRequestHandler handler = new SingleRequestHandler(_connPool, server, ctxt.getRequest(), e.getValue(), ctxt.getTimeRemaining(), requestDispatchLatch, brokerMetrics);
// Submit to thread-pool for checking-out and sending request
_executorService.submit(handler);
handlers.add(handler);
}
// Create the composite future for returning
CompositeFuture<ServerInstance, ByteBuf> response = new CompositeFuture<ServerInstance, ByteBuf>("scatterRequest", GatherModeOnError.SHORTCIRCUIT_AND);
// Wait for requests to be sent
long timeRemaining = ctxt.getTimeRemaining();
boolean sentSuccessfully = requestDispatchLatch.await(timeRemaining, TimeUnit.MILLISECONDS);
if (sentSuccessfully) {
List<KeyedFuture<ServerInstance, ByteBuf>> responseFutures = new ArrayList<KeyedFuture<ServerInstance, ByteBuf>>();
for (SingleRequestHandler h : handlers) {
responseFutures.add(h.getResponseFuture());
String serverName = h.getServer().toString();
if (isOfflineTable != null) {
if (isOfflineTable) {
serverName += ScatterGatherStats.OFFLINE_TABLE_SUFFIX;
} else {
serverName += ScatterGatherStats.REALTIME_TABLE_SUFFIX;
}
}
scatterGatherStats.setSendStartTimeMillis(serverName, h.getConnStartTimeMillis());
scatterGatherStats.setConnStartTimeMillis(serverName, h.getStartDelayMillis());
scatterGatherStats.setSendCompletionTimeMillis(serverName, h.getSendCompletionTimeMillis());
}
response.start(responseFutures);
} else {
LOGGER.error("Request (" + ctxt.getRequest().getRequestId() + ") not sent completely within time (" + timeRemaining + " ms) !! Cancelling !!. NumSentFailed:" + requestDispatchLatch.getCount());
response.start(null);
// and so we cancel all of them here
for (SingleRequestHandler h : handlers) {
LOGGER.info("Request to {} was sent successfully:{}, cancelling.", h.getServer(), h.isSent());
h.cancel();
}
}
t.stop();
_latency.update(t.getLatencyMs());
return response;
}
use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class ScatterGatherImpl method selectServicesPerPartition.
/**
* For each segmentId in the instanceToSegmentMap, we select one (or more speculative) servers
*
* @param requestContext
*/
private void selectServicesPerPartition(ScatterGatherRequestContext requestContext) {
Map<ServerInstance, SegmentIdSet> selectedServers = new HashMap<ServerInstance, SegmentIdSet>();
ScatterGatherRequest request = requestContext.getRequest();
Map<List<ServerInstance>, SegmentIdSet> instanceToSegmentMap = requestContext.getInvertedMap();
ReplicaSelection selection = request.getReplicaSelection();
for (Entry<List<ServerInstance>, SegmentIdSet> e : instanceToSegmentMap.entrySet()) {
SegmentId firstPartition = null;
for (SegmentId p : e.getValue().getSegments()) {
/**
* For selecting the server, we always use first segmentId in the group. This will provide
* more chance for fanning out the query
*/
if (null == firstPartition) {
firstPartition = p;
}
ServerInstance s = selection.selectServer(firstPartition, e.getKey(), request.getHashKey());
mergePartitionGroup(selectedServers, s, p);
}
}
requestContext.setSelectedServers(selectedServers);
}
use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class ReplicaSelectionTest method testRandomSelection.
@Test
public void testRandomSelection() {
// Create 2 random selection with the same seed. Ensure they return the same values
RandomReplicaSelection sel1 = new RandomReplicaSelection(0);
RandomReplicaSelection sel2 = new RandomReplicaSelection(0);
ServerInstance s1 = new ServerInstance("localhost", 8080);
ServerInstance s2 = new ServerInstance("localhost", 8081);
ServerInstance s3 = new ServerInstance("localhost", 8082);
ServerInstance[] servers = { s1, s2, s3 };
// Verify for an empty list, selectServer returns null
List<ServerInstance> candidates = new ArrayList<ServerInstance>();
Assert.assertNull(sel1.selectServer(new SegmentId("1"), candidates, null));
}
Aggregations