Search in sources :

Example 31 with DegraderTrackerClient

use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.

the class DegraderLoadBalancerTest method testDegraderLoadBalancerSimulator.

private void testDegraderLoadBalancerSimulator(DegraderLoadBalancerStrategyAdapter adapter, TestClock clock, long timeInterval, List<DegraderTrackerClient> clients, double qps, DegraderImpl.Config degraderConfig) {
    long clusterGenerationId = 1;
    double overrideDropRate = 0.0;
    // simulate latency 4000 ms
    // 1st round we use LOAD_BALANCING strategy. Since we have a high latency we will decrease the number of points
    // from 100 to 80 (transmissionRate * points per weight).
    TrackerClient resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, true, 0.0, 4000, false, false);
    assertNotNull(resultTC);
    // 2nd round drop rate should be increased by DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, false, overrideDropRate, 4000, false, false);
    // 3rd round. We alternate back to LOAD_BALANCING strategy and we drop the points even more
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, true, overrideDropRate, 4000, false, false);
    // 4th round. The drop rate should be increased again like 2nd round
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, false, overrideDropRate, 4000, false, false);
    // 5th round. Alternate to changing hash ring again.
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, true, overrideDropRate, 4000, false, false);
    // 6th round. Same as 5th round, we'll increase the drop rate
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
    // 7th round. The # of point in hashring is at the minimum so we can't decrease it further. At this point the client
    // is in recovery mode. But since we can't change the hashring anymore, we'll always in CALL_DROPPING mode
    // so the next strategy is expected to be LOAD_BALANCING mode.
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
    // 8th round. We'll increase the drop rate to the max.
    overrideDropRate += DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_UP;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
    // 9th round, now we'll simulate as if there still a call even though we drop 100% of all request to get
    // tracker client. The assumption is there's some thread that still holds tracker client and we want
    // to make sure we can handle the request and we can't degrade the cluster even further.
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
    // 10th round, now we'll simulate as if there's no call because we dropped all request
    // even though we are in LOAD_BALANCING mode and this tracker client is in recovery mode and there's no call
    // so the hashring doesn't change so we go back to reducing the drop rate to 0.8 and that means the next
    // strategy is LOAD_BALANCE
    overrideDropRate -= DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_DOWN;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, 0.0, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 4000, false, false);
    // 11th round, this time we'll simulate the latency is now 1000 ms (so it's within low and high watermark). Drop rate
    // should stay the same and everything else should stay the same
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, overrideDropRate, 1000, false, false);
    // we'll simulate the client dying one by one until all the clients are gone
    int numberOfClients = clients.size();
    HashSet<URI> uris = new HashSet<>();
    HashSet<URI> removedUris = new HashSet<>();
    for (TrackerClient client : clients) {
        uris.add(client.getUri());
    }
    LinkedList<TrackerClient> removedClients = new LinkedList<>();
    // loadBalancing strategy will always be picked because there is no hash ring changes
    boolean isLoadBalancingStrategyTurn = true;
    for (int i = numberOfClients; i > 0; i--) {
        TrackerClient removed = clients.remove(0);
        uris.remove(removed.getUri());
        removedClients.addLast(removed);
        removedUris.add(removed.getUri());
        clusterGenerationId++;
        resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, isLoadBalancingStrategyTurn, overrideDropRate, 1000, false, false);
        if (i == 1) {
            assertNull(resultTC);
        } else {
            // the override drop rate is 0.8)
            if (resultTC != null) {
                assertTrue(uris.contains(resultTC.getUri()));
                assertFalse(removedUris.contains(resultTC.getUri()));
            }
        }
    }
    assertTrue(uris.isEmpty());
    assertTrue(clients.isEmpty());
    assertEquals(removedUris.size(), numberOfClients);
    assertEquals(removedClients.size(), numberOfClients);
    // we'll simulate the client start reviving one by one until all clients are back up again
    for (int i = numberOfClients; i > 0; i--) {
        TrackerClient added = removedClients.remove(0);
        // we have to create a new client. The old client has a degraded DegraderImpl. And in production enviroment
        // when a new client join a cluster, it should be in good state. This means there should be 100 points
        // in the hash ring for this client
        DegraderTrackerClient newClient = new DegraderTrackerClientImpl(added.getUri(), getDefaultPartitionData(1d), new TestLoadBalancerClient(added.getUri()), clock, degraderConfig);
        clients.add(newClient);
        uris.add(added.getUri());
        removedUris.remove(added.getUri());
        clusterGenerationId++;
        resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, isLoadBalancingStrategyTurn, overrideDropRate, 1000, false, false);
        if (resultTC != null) {
            assertTrue(uris.contains(resultTC.getUri()));
            assertFalse(removedUris.contains(resultTC.getUri()));
        }
    }
    // the number of points because there is no hash ring changes
    for (overrideDropRate -= DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_DOWN; overrideDropRate >= 0; overrideDropRate -= DegraderLoadBalancerStrategyConfig.DEFAULT_GLOBAL_STEP_DOWN) {
        resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, false, overrideDropRate, 300, false, false);
    }
    // we should have recovered fully by this time
    overrideDropRate = 0.0;
    resultTC = simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, false, overrideDropRate, 300, false, false);
    assertNotNull(resultTC);
    clusterGenerationId++;
    // simulate the increase of certain error (connect exception, closedChannelException) rate will cause degradation.
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, true, 0.0, 300, false, true);
    // switching to call dropping strategy
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, false, 0.0, 300, false, true);
    // continue the degradation
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, true, 0.0, 300, false, true);
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, false, 0.0, 300, false, true);
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, true, 0.0, 300, false, true);
    // now let's remove all the error and see how the cluster recover but we have to wait until next round because
    // this round is CALL_DROP strategy
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 1, false, 0.0, 300, false, false);
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, true, 0.0, 300, false, false);
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 39, false, 0.0, 300, false, false);
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, true, 0.0, 300, false, false);
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 80, false, 0.0, 300, false, false);
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, true, 0.0, 300, false, false);
    // make sure if we have error that is not from CONNECT_EXCEPTION or CLOSED_CHANNEL_EXCEPTION we don't degrade
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, false, 0.0, 300, true, false);
    // since there's no change in hash ring due to error NOT of CONNECT_EXCEPTION or CLOSED_CHANNEL_EXCEPTION,
    // the strategy won't change to CALL_DROPPING
    simulateAndTestOneInterval(timeInterval, clock, qps, clients, adapter, clusterGenerationId, 100, false, 0.0, 300, true, false);
}
Also used : DegraderTrackerClient(com.linkedin.d2.balancer.clients.DegraderTrackerClient) URI(java.net.URI) LinkedList(java.util.LinkedList) DegraderTrackerClient(com.linkedin.d2.balancer.clients.DegraderTrackerClient) TrackerClient(com.linkedin.d2.balancer.clients.TrackerClient) DegraderTrackerClientImpl(com.linkedin.d2.balancer.clients.DegraderTrackerClientImpl) HashSet(java.util.HashSet)

Example 32 with DegraderTrackerClient

use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.

the class DegraderLoadBalancerTest method testOneTrackerClient.

@Test(groups = { "small", "back-end" })
public void testOneTrackerClient() throws URISyntaxException {
    DegraderLoadBalancerStrategyV3 strategy = getStrategy();
    List<DegraderTrackerClient> clients = new ArrayList<>();
    URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
    clients.add(getClient(uri1, new TestClock()));
    // should always get the only client in the list
    for (int i = 0; i < 1000; ++i) {
        assertEquals(getTrackerClient(strategy, null, new RequestContext(), 0, clients), clients.get(0));
    }
}
Also used : DegraderTrackerClient(com.linkedin.d2.balancer.clients.DegraderTrackerClient) ArrayList(java.util.ArrayList) RequestContext(com.linkedin.r2.message.RequestContext) URI(java.net.URI) Test(org.testng.annotations.Test) DegraderTrackerClientTest(com.linkedin.d2.balancer.clients.DegraderTrackerClientTest)

Example 33 with DegraderTrackerClient

use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.

the class DegraderLoadBalancerTest method testMediumTrafficHighLatency100Clients.

@Test(groups = { "small", "back-end" })
public void testMediumTrafficHighLatency100Clients() {
    Map<String, Object> myMap = lbDefaultConfig();
    Long timeInterval = 5000L;
    TestClock clock = new TestClock();
    myMap.put(PropertyKeys.CLOCK, clock);
    myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, timeInterval);
    Map<String, String> degraderProperties = degraderDefaultConfig();
    degraderProperties.put(PropertyKeys.DEGRADER_HIGH_ERROR_RATE, "0.5");
    degraderProperties.put(PropertyKeys.DEGRADER_LOW_ERROR_RATE, "0.2");
    DegraderImpl.Config degraderConfig = DegraderConfigFactory.toDegraderConfig(degraderProperties);
    double qps = 7.3;
    // test Strategy V3
    List<DegraderTrackerClient> clients = createTrackerClient(100, clock, degraderConfig);
    DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
    DegraderLoadBalancerStrategyV3 strategyV3 = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null, DEGRADER_STATE_LISTENER_FACTORIES);
    DegraderLoadBalancerStrategyAdapter strategy = new DegraderLoadBalancerStrategyAdapter(strategyV3);
    testDegraderLoadBalancerSimulator(strategy, clock, timeInterval, clients, qps, degraderConfig);
}
Also used : DegraderTrackerClient(com.linkedin.d2.balancer.clients.DegraderTrackerClient) DegraderImpl(com.linkedin.util.degrader.DegraderImpl) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.testng.annotations.Test) DegraderTrackerClientTest(com.linkedin.d2.balancer.clients.DegraderTrackerClientTest)

Example 34 with DegraderTrackerClient

use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.

the class DegraderLoadBalancerTest method testHighTrafficHighLatency1Client.

@Test(groups = { "small", "back-end" })
public void testHighTrafficHighLatency1Client() {
    Map<String, Object> myMap = lbDefaultConfig();
    Long timeInterval = 5000L;
    TestClock clock = new TestClock();
    myMap.put(PropertyKeys.CLOCK, clock);
    myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, timeInterval);
    Map<String, String> degraderProperties = degraderDefaultConfig();
    degraderProperties.put(PropertyKeys.DEGRADER_HIGH_ERROR_RATE, "0.5");
    degraderProperties.put(PropertyKeys.DEGRADER_LOW_ERROR_RATE, "0.2");
    DegraderImpl.Config degraderConfig = DegraderConfigFactory.toDegraderConfig(degraderProperties);
    double qps = 121;
    // test Strategy V3
    List<DegraderTrackerClient> clients = createTrackerClient(1, clock, degraderConfig);
    DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
    DegraderLoadBalancerStrategyV3 strategyV3 = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null, DEGRADER_STATE_LISTENER_FACTORIES);
    DegraderLoadBalancerStrategyAdapter strategy = new DegraderLoadBalancerStrategyAdapter(strategyV3);
    testDegraderLoadBalancerSimulator(strategy, clock, timeInterval, clients, qps, degraderConfig);
}
Also used : DegraderTrackerClient(com.linkedin.d2.balancer.clients.DegraderTrackerClient) DegraderImpl(com.linkedin.util.degrader.DegraderImpl) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.testng.annotations.Test) DegraderTrackerClientTest(com.linkedin.d2.balancer.clients.DegraderTrackerClientTest)

Example 35 with DegraderTrackerClient

use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.

the class SimpleLoadBalancerTest method testLoadBalancerDropRate.

/**
 * This test simulates dropping requests by playing with OverrideDropRate in config
 */
@Test(groups = { "small", "back-end" })
public void testLoadBalancerDropRate() throws ServiceUnavailableException, ExecutionException, InterruptedException {
    final int RETRY = 10;
    for (int tryAgain = 0; tryAgain < RETRY; ++tryAgain) {
        Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>();
        Map<String, TransportClientFactory> clientFactories = new HashMap<>();
        List<String> prioritizedSchemes = new ArrayList<>();
        MockStore<ServiceProperties> serviceRegistry = new MockStore<>();
        MockStore<ClusterProperties> clusterRegistry = new MockStore<>();
        MockStore<UriProperties> uriRegistry = new MockStore<>();
        ScheduledExecutorService executorService = new SynchronousExecutorService();
        // loadBalancerStrategyFactories.put("rr", new RandomLoadBalancerStrategyFactory());
        loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
        // PrpcClientFactory();
        // new
        clientFactories.put(PropertyKeys.HTTP_SCHEME, new DoNothingClientFactory());
        // HttpClientFactory();
        SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriRegistry, clusterRegistry, serviceRegistry, clientFactories, loadBalancerStrategyFactories);
        SimpleLoadBalancer loadBalancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS, _d2Executor);
        FutureCallback<None> balancerCallback = new FutureCallback<>();
        loadBalancer.start(balancerCallback);
        balancerCallback.get();
        URI uri1 = URI.create("http://test.qa1.com:1234");
        URI uri2 = URI.create("http://test.qa2.com:2345");
        URI uri3 = URI.create("http://test.qa3.com:6789");
        Map<Integer, PartitionData> partitionData = new HashMap<>(1);
        partitionData.put(DEFAULT_PARTITION_ID, new PartitionData(1d));
        Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<>(3);
        uriData.put(uri1, partitionData);
        uriData.put(uri2, partitionData);
        uriData.put(uri3, partitionData);
        prioritizedSchemes.add(PropertyKeys.HTTP_SCHEME);
        clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
        serviceRegistry.put("foo", new ServiceProperties("foo", "cluster-1", "/foo", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, prioritizedSchemes, null));
        uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));
        URI expectedUri1 = URI.create("http://test.qa1.com:1234/foo");
        URI expectedUri2 = URI.create("http://test.qa2.com:2345/foo");
        URI expectedUri3 = URI.create("http://test.qa3.com:6789/foo");
        Set<URI> expectedUris = new HashSet<>();
        expectedUris.add(expectedUri1);
        expectedUris.add(expectedUri2);
        expectedUris.add(expectedUri3);
        Random random = new Random();
        for (int i = 0; i < 100; ++i) {
            try {
                RewriteLoadBalancerClient client = (RewriteLoadBalancerClient) loadBalancer.getClient(new URIRequest("d2://foo/52"), new RequestContext());
                assertTrue(client.getDecoratedClient() instanceof RewriteClient);
                RewriteClient rewriteClient = (RewriteClient) client.getDecoratedClient();
                assertTrue(rewriteClient.getDecoratedClient() instanceof TrackerClient);
                DegraderTrackerClient tClient = (DegraderTrackerClient) rewriteClient.getDecoratedClient();
                DegraderImpl degrader = (DegraderImpl) tClient.getDegrader(DEFAULT_PARTITION_ID);
                DegraderImpl.Config cfg = new DegraderImpl.Config(degrader.getConfig());
                // Change DropRate to 0.0 at the rate of 1/3
                cfg.setOverrideDropRate((random.nextInt(2) == 0) ? 1.0 : 0.0);
                degrader.setConfig(cfg);
                assertTrue(expectedUris.contains(client.getUri()));
                assertEquals(client.getUri().getScheme(), PropertyKeys.HTTP_SCHEME);
            } catch (ServiceUnavailableException e) {
                assertTrue(e.toString().contains("in a bad state (high latency/high error)"));
            }
        }
        final CountDownLatch latch = new CountDownLatch(1);
        PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {

            @Override
            public void done() {
                latch.countDown();
            }
        };
        state.shutdown(callback);
        if (!latch.await(60, TimeUnit.SECONDS)) {
            fail("unable to shutdown state");
        }
        executorService.shutdownNow();
        assertTrue(executorService.isShutdown(), "ExecutorService should have shut down!");
    }
}
Also used : PropertyEventShutdownCallback(com.linkedin.d2.discovery.event.PropertyEventThread.PropertyEventShutdownCallback) HashMap(java.util.HashMap) DegraderLoadBalancerStrategyFactoryV3(com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyFactoryV3) DarkClusterConfig(com.linkedin.d2.DarkClusterConfig) ArrayList(java.util.ArrayList) MockStore(com.linkedin.d2.discovery.stores.mock.MockStore) ServiceUnavailableException(com.linkedin.d2.balancer.ServiceUnavailableException) URI(java.net.URI) Random(java.util.Random) DegraderTrackerClient(com.linkedin.d2.balancer.clients.DegraderTrackerClient) TrackerClient(com.linkedin.d2.balancer.clients.TrackerClient) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) RequestContext(com.linkedin.r2.message.RequestContext) TransportClientFactory(com.linkedin.r2.transport.common.TransportClientFactory) FutureCallback(com.linkedin.common.callback.FutureCallback) HashSet(java.util.HashSet) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SynchronousExecutorService(com.linkedin.d2.discovery.event.SynchronousExecutorService) RandomLoadBalancerStrategyFactory(com.linkedin.d2.balancer.strategies.random.RandomLoadBalancerStrategyFactory) LoadBalancerStrategyFactory(com.linkedin.d2.balancer.strategies.LoadBalancerStrategyFactory) DegraderTrackerClient(com.linkedin.d2.balancer.clients.DegraderTrackerClient) RewriteLoadBalancerClient(com.linkedin.d2.balancer.clients.RewriteLoadBalancerClient) URIRequest(com.linkedin.d2.balancer.util.URIRequest) DegraderImpl(com.linkedin.util.degrader.DegraderImpl) LoadBalancerStrategy(com.linkedin.d2.balancer.strategies.LoadBalancerStrategy) CountDownLatch(java.util.concurrent.CountDownLatch) RewriteClient(com.linkedin.d2.balancer.clients.RewriteClient) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) None(com.linkedin.common.util.None) Map(java.util.Map) DarkClusterConfigMap(com.linkedin.d2.DarkClusterConfigMap) HashMap(java.util.HashMap) Test(org.testng.annotations.Test)

Aggregations

DegraderTrackerClient (com.linkedin.d2.balancer.clients.DegraderTrackerClient)58 Test (org.testng.annotations.Test)46 DegraderTrackerClientTest (com.linkedin.d2.balancer.clients.DegraderTrackerClientTest)42 ArrayList (java.util.ArrayList)34 URI (java.net.URI)31 RequestContext (com.linkedin.r2.message.RequestContext)30 TrackerClient (com.linkedin.d2.balancer.clients.TrackerClient)20 DegraderImpl (com.linkedin.util.degrader.DegraderImpl)19 AtomicLong (java.util.concurrent.atomic.AtomicLong)17 DegraderControl (com.linkedin.util.degrader.DegraderControl)16 HashMap (java.util.HashMap)16 DegraderTrackerClientImpl (com.linkedin.d2.balancer.clients.DegraderTrackerClientImpl)14 CallCompletion (com.linkedin.util.degrader.CallCompletion)11 URIRequest (com.linkedin.d2.balancer.util.URIRequest)9 PartitionData (com.linkedin.d2.balancer.properties.PartitionData)7 SettableClock (com.linkedin.util.clock.SettableClock)6 LoadBalancerQuarantine (com.linkedin.d2.balancer.strategies.LoadBalancerQuarantine)4 RestRequest (com.linkedin.r2.message.rest.RestRequest)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)4