Search in sources :

Example 1 with ServerInstance

use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.

the class NettySingleConnectionIntegrationTest method testServerShutdownLeak.

/*
   * This test attempts to use the connection mechanism the same way as ScatterGatherImpl.SingleRequestHandler does.
   *
   * WARNING: This test has potential failures due to timing.
   */
@Test
public void testServerShutdownLeak() throws Exception {
    final NettyClientMetrics metric = new NettyClientMetrics(null, "abc");
    final Timer timer = new HashedWheelTimer();
    final int minConns = 2;
    final int maxConns = 3;
    // 10M ms.
    final int maxIdleTimeoutMs = 10000000;
    final int maxBacklogPerServer = 1;
    MyServer server = new MyServer();
    Thread.sleep(1000);
    // used as a key to pool. Can be anything.
    final String serverName = "SomeServer";
    final ServerInstance serverInstance = server.getServerInstance();
    final MetricsRegistry metricsRegistry = new MetricsRegistry();
    EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    PooledNettyClientResourceManager resourceManager = new PooledNettyClientResourceManager(eventLoopGroup, new HashedWheelTimer(), metric);
    ExecutorService executorService = Executors.newCachedThreadPool();
    ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(5);
    AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection> rmAdapter = new AsyncPoolResourceManagerAdapter<ServerInstance, NettyClientConnection>(serverInstance, resourceManager, executorService, metricsRegistry);
    KeyedPool<ServerInstance, NettyClientConnection> keyedPool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(minConns, maxConns, maxIdleTimeoutMs, maxBacklogPerServer, resourceManager, timeoutExecutor, executorService, metricsRegistry);
    resourceManager.setPool(keyedPool);
    keyedPool.start();
    Field keyedPoolMap = KeyedPoolImpl.class.getDeclaredField("_keyedPool");
    keyedPoolMap.setAccessible(true);
    KeyedFuture<ServerInstance, NettyClientConnection> keyedFuture = keyedPool.checkoutObject(serverInstance);
    // The connection pool for this server is created on demand, so we can now get a reference to the _keyedPool.
    // The act of calling checkoutObject() creates a new AsyncPoolImpl and places a request for a new connection.
    // Since no new connections are available in the beginning, we always end up creating one more than the min.
    Map<ServerInstance, AsyncPool<NettyClientConnection>> poolMap = (Map<ServerInstance, AsyncPool<NettyClientConnection>>) keyedPoolMap.get(keyedPool);
    AsyncPool<NettyClientConnection> asyncPool = poolMap.get(serverInstance);
    Field waiterList = AsyncPoolImpl.class.getDeclaredField("_waiters");
    waiterList.setAccessible(true);
    LinkedDequeue queue = (LinkedDequeue) waiterList.get(asyncPool);
    PoolStats stats;
    // If the number of waiters is = 0, then we will error out because the min connections may not have completed
    // by the time we check one out. If maxWaiters is > 0, then we may end up initiating a fresh connection while the
    // min is still being filled. So, best to sleep a little to make sure that the min pool size is filled out, so that
    // the stats are correct.
    Thread.sleep(2000L);
    stats = asyncPool.getStats();
    Assert.assertEquals(stats.getIdleCount(), minConns);
    Assert.assertEquals(stats.getPoolSize(), minConns + 1);
    NettyClientConnection conn = keyedFuture.getOne();
    LOGGER.debug("Got connection ID " + conn.getConnId());
    Assert.assertEquals(stats.getIdleCount(), minConns);
    Assert.assertEquals(stats.getPoolSize(), minConns + 1);
    // Now get two more connections to the server, since we have 2 idle, we should get those.
    // And leak them.
    keyedFuture = keyedPool.checkoutObject(serverInstance);
    conn = keyedFuture.getOne();
    LOGGER.debug("Got connection ID " + conn.getConnId());
    keyedFuture = keyedPool.checkoutObject(serverInstance);
    conn = keyedFuture.getOne();
    LOGGER.debug("Got connection ID " + conn.getConnId());
    // Now we should have 0 idle, and a pool size of 3 with no waiters.
    stats = asyncPool.getStats();
    Assert.assertEquals(stats.getIdleCount(), 0);
    Assert.assertEquals(stats.getPoolSize(), minConns + 1);
    Assert.assertEquals(queue.size(), 0);
    // Now, we will always get an exception because we don't have a free connection to the server.
    {
        keyedFuture = keyedPool.checkoutObject(serverInstance);
        boolean caughtException = false;
        LOGGER.debug("Will never get a connection here.");
        try {
            conn = keyedFuture.getOne(3, TimeUnit.SECONDS);
        } catch (TimeoutException e) {
            caughtException = true;
        }
        Assert.assertTrue(caughtException);
        keyedFuture.cancel(true);
    }
    // Now if the server goes down, we should release all three connections and be able to get a successful new connection
    LOGGER.info("Shutting down server instance");
    server.shutdown();
    // Give it time to clean up on the client side.
    Thread.sleep(2000L);
    stats = asyncPool.getStats();
    LOGGER.debug(stats.toString());
    // There will be a couple in idleCount in error state.
    Assert.assertEquals(stats.getIdleCount(), minConns);
    Assert.assertEquals(stats.getPoolSize(), minConns);
    LOGGER.debug("Restarting server instance");
    server.restart();
    Thread.sleep(3000);
    LOGGER.debug("Server restart successful\n" + asyncPool.getStats());
    // Now get 3 connections successfully
    for (int i = 0; i < 3; i++) {
        keyedFuture = keyedPool.checkoutObject(serverInstance);
        conn = keyedFuture.getOne();
        Assert.assertNotNull(conn);
    }
    server.shutdown();
}
Also used : ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) Field(java.lang.reflect.Field) ServerInstance(com.linkedin.pinot.common.response.ServerInstance) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) TimeoutException(java.util.concurrent.TimeoutException) MetricsRegistry(com.yammer.metrics.core.MetricsRegistry) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) NettyClientMetrics(com.linkedin.pinot.transport.metrics.NettyClientMetrics) AsyncPoolResourceManagerAdapter(com.linkedin.pinot.transport.pool.AsyncPoolResourceManagerAdapter) LinkedDequeue(com.linkedin.pinot.transport.common.LinkedDequeue) HashedWheelTimer(io.netty.util.HashedWheelTimer) KeyedPoolImpl(com.linkedin.pinot.transport.pool.KeyedPoolImpl) PoolStats(com.linkedin.pinot.transport.metrics.PoolStats) EventLoopGroup(io.netty.channel.EventLoopGroup) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) HashedWheelTimer(io.netty.util.HashedWheelTimer) Timer(io.netty.util.Timer) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ExecutorService(java.util.concurrent.ExecutorService) Map(java.util.Map) AsyncPool(com.linkedin.pinot.transport.pool.AsyncPool) Test(org.testng.annotations.Test)

Example 2 with ServerInstance

use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.

the class ServerToSegmentSetMap method toString.

@Override
public String toString() {
    try {
        JSONObject ret = new JSONObject();
        for (ServerInstance i : _routingTable.keySet()) {
            JSONArray serverInstanceSegmentList = new JSONArray();
            List<String> sortedSegmentIds = new ArrayList<>();
            for (SegmentId segmentId : _routingTable.get(i).getSegments()) {
                sortedSegmentIds.add(segmentId.getSegmentId());
            }
            Collections.sort(sortedSegmentIds);
            for (String sortedSegmentId : sortedSegmentIds) {
                serverInstanceSegmentList.put(sortedSegmentId);
            }
            ret.put(i.toString(), serverInstanceSegmentList);
        }
        return ret.toString();
    } catch (Exception e) {
        logger.error("error toString()", e);
        return "routing table : [ " + _routingTable + " ] ";
    }
}
Also used : JSONObject(org.json.JSONObject) SegmentId(com.linkedin.pinot.transport.common.SegmentId) JSONArray(org.json.JSONArray) ArrayList(java.util.ArrayList) ServerInstance(com.linkedin.pinot.common.response.ServerInstance)

Example 3 with ServerInstance

use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.

the class ScatterGatherImpl method sendRequest.

/**
   *
   * Helper Function to send scatter-request. This method should be called after the servers are selected
   *
   * @param ctxt Scatter-Gather Request context with selected servers for each request.
   * @param scatterGatherStats scatter-gather statistics.
   * @param isOfflineTable whether the scatter-gather target is an OFFLINE table.
   * @param brokerMetrics broker metrics to track execution statistics.
   * @return a composite future representing the gather process.
   * @throws InterruptedException
   */
protected CompositeFuture<ServerInstance, ByteBuf> sendRequest(ScatterGatherRequestContext ctxt, ScatterGatherStats scatterGatherStats, Boolean isOfflineTable, BrokerMetrics brokerMetrics) throws InterruptedException {
    TimerContext t = MetricsHelper.startTimer();
    // Servers are expected to be selected at this stage
    Map<ServerInstance, SegmentIdSet> mp = ctxt.getSelectedServers();
    CountDownLatch requestDispatchLatch = new CountDownLatch(mp.size());
    // async checkout of connections and then dispatch of request
    List<SingleRequestHandler> handlers = new ArrayList<SingleRequestHandler>(mp.size());
    for (Entry<ServerInstance, SegmentIdSet> e : mp.entrySet()) {
        ServerInstance server = e.getKey();
        String serverName = server.toString();
        if (isOfflineTable != null) {
            if (isOfflineTable) {
                serverName += ScatterGatherStats.OFFLINE_TABLE_SUFFIX;
            } else {
                serverName += ScatterGatherStats.REALTIME_TABLE_SUFFIX;
            }
        }
        scatterGatherStats.initServer(serverName);
        SingleRequestHandler handler = new SingleRequestHandler(_connPool, server, ctxt.getRequest(), e.getValue(), ctxt.getTimeRemaining(), requestDispatchLatch, brokerMetrics);
        // Submit to thread-pool for checking-out and sending request
        _executorService.submit(handler);
        handlers.add(handler);
    }
    // Create the composite future for returning
    CompositeFuture<ServerInstance, ByteBuf> response = new CompositeFuture<ServerInstance, ByteBuf>("scatterRequest", GatherModeOnError.SHORTCIRCUIT_AND);
    // Wait for requests to be sent
    long timeRemaining = ctxt.getTimeRemaining();
    boolean sentSuccessfully = requestDispatchLatch.await(timeRemaining, TimeUnit.MILLISECONDS);
    if (sentSuccessfully) {
        List<KeyedFuture<ServerInstance, ByteBuf>> responseFutures = new ArrayList<KeyedFuture<ServerInstance, ByteBuf>>();
        for (SingleRequestHandler h : handlers) {
            responseFutures.add(h.getResponseFuture());
            String serverName = h.getServer().toString();
            if (isOfflineTable != null) {
                if (isOfflineTable) {
                    serverName += ScatterGatherStats.OFFLINE_TABLE_SUFFIX;
                } else {
                    serverName += ScatterGatherStats.REALTIME_TABLE_SUFFIX;
                }
            }
            scatterGatherStats.setSendStartTimeMillis(serverName, h.getConnStartTimeMillis());
            scatterGatherStats.setConnStartTimeMillis(serverName, h.getStartDelayMillis());
            scatterGatherStats.setSendCompletionTimeMillis(serverName, h.getSendCompletionTimeMillis());
        }
        response.start(responseFutures);
    } else {
        LOGGER.error("Request (" + ctxt.getRequest().getRequestId() + ") not sent completely within time (" + timeRemaining + " ms) !! Cancelling !!. NumSentFailed:" + requestDispatchLatch.getCount());
        response.start(null);
        // and so we cancel all of them here
        for (SingleRequestHandler h : handlers) {
            LOGGER.info("Request to {} was sent successfully:{}, cancelling.", h.getServer(), h.isSent());
            h.cancel();
        }
    }
    t.stop();
    _latency.update(t.getLatencyMs());
    return response;
}
Also used : CompositeFuture(com.linkedin.pinot.transport.common.CompositeFuture) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) ByteBuf(io.netty.buffer.ByteBuf) KeyedFuture(com.linkedin.pinot.transport.common.KeyedFuture) TimerContext(com.linkedin.pinot.common.metrics.MetricsHelper.TimerContext) SegmentIdSet(com.linkedin.pinot.transport.common.SegmentIdSet) ServerInstance(com.linkedin.pinot.common.response.ServerInstance)

Example 4 with ServerInstance

use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.

the class ScatterGatherImpl method selectServicesPerPartition.

/**
   * For each segmentId in the instanceToSegmentMap, we select one (or more speculative) servers
   *
   * @param requestContext
   */
private void selectServicesPerPartition(ScatterGatherRequestContext requestContext) {
    Map<ServerInstance, SegmentIdSet> selectedServers = new HashMap<ServerInstance, SegmentIdSet>();
    ScatterGatherRequest request = requestContext.getRequest();
    Map<List<ServerInstance>, SegmentIdSet> instanceToSegmentMap = requestContext.getInvertedMap();
    ReplicaSelection selection = request.getReplicaSelection();
    for (Entry<List<ServerInstance>, SegmentIdSet> e : instanceToSegmentMap.entrySet()) {
        SegmentId firstPartition = null;
        for (SegmentId p : e.getValue().getSegments()) {
            /**
         * For selecting the server, we always use first segmentId in the group. This will provide
         * more chance for fanning out the query
         */
            if (null == firstPartition) {
                firstPartition = p;
            }
            ServerInstance s = selection.selectServer(firstPartition, e.getKey(), request.getHashKey());
            mergePartitionGroup(selectedServers, s, p);
        }
    }
    requestContext.setSelectedServers(selectedServers);
}
Also used : ReplicaSelection(com.linkedin.pinot.transport.common.ReplicaSelection) HashMap(java.util.HashMap) SegmentId(com.linkedin.pinot.transport.common.SegmentId) SegmentIdSet(com.linkedin.pinot.transport.common.SegmentIdSet) ArrayList(java.util.ArrayList) List(java.util.List) ServerInstance(com.linkedin.pinot.common.response.ServerInstance)

Example 5 with ServerInstance

use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.

the class ReplicaSelectionTest method testRandomSelection.

@Test
public void testRandomSelection() {
    // Create 2 random selection with the same seed. Ensure they return the same values
    RandomReplicaSelection sel1 = new RandomReplicaSelection(0);
    RandomReplicaSelection sel2 = new RandomReplicaSelection(0);
    ServerInstance s1 = new ServerInstance("localhost", 8080);
    ServerInstance s2 = new ServerInstance("localhost", 8081);
    ServerInstance s3 = new ServerInstance("localhost", 8082);
    ServerInstance[] servers = { s1, s2, s3 };
    // Verify for an empty list, selectServer returns null
    List<ServerInstance> candidates = new ArrayList<ServerInstance>();
    Assert.assertNull(sel1.selectServer(new SegmentId("1"), candidates, null));
}
Also used : ArrayList(java.util.ArrayList) ServerInstance(com.linkedin.pinot.common.response.ServerInstance) Test(org.testng.annotations.Test)

Aggregations

ServerInstance (com.linkedin.pinot.common.response.ServerInstance)55 HashMap (java.util.HashMap)35 Test (org.testng.annotations.Test)35 DataTable (com.linkedin.pinot.common.utils.DataTable)26 BrokerRequest (com.linkedin.pinot.common.request.BrokerRequest)23 BrokerResponseNative (com.linkedin.pinot.common.response.broker.BrokerResponseNative)23 QueryRequest (com.linkedin.pinot.common.query.QueryRequest)22 InstanceRequest (com.linkedin.pinot.common.request.InstanceRequest)22 ArrayList (java.util.ArrayList)18 SegmentIdSet (com.linkedin.pinot.transport.common.SegmentIdSet)14 ByteBuf (io.netty.buffer.ByteBuf)11 NettyClientMetrics (com.linkedin.pinot.transport.metrics.NettyClientMetrics)10 NioEventLoopGroup (io.netty.channel.nio.NioEventLoopGroup)10 HashedWheelTimer (io.netty.util.HashedWheelTimer)10 MetricsRegistry (com.yammer.metrics.core.MetricsRegistry)9 QuerySource (com.linkedin.pinot.common.request.QuerySource)8 IndexSegment (com.linkedin.pinot.core.indexsegment.IndexSegment)8 SegmentId (com.linkedin.pinot.transport.common.SegmentId)8 EventLoopGroup (io.netty.channel.EventLoopGroup)8 ScheduledThreadPoolExecutor (java.util.concurrent.ScheduledThreadPoolExecutor)8