Search in sources :

Example 71 with Server

use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.

the class ZKFSTest method testZKDown.

@Test
public void testZKDown() throws Exception {
    final String TEST_SERVICE_NAME = "testingService";
    final String TEST_CLUSTER_NAME = "someCluster";
    startServer();
    try {
        ZKFSLoadBalancer balancer = getBalancer();
        FutureCallback<None> callback = new FutureCallback<>();
        balancer.start(callback);
        callback.get(30, TimeUnit.SECONDS);
        ZKConnection conn = new ZKConnection("localhost:" + PORT, 30000);
        conn.start();
        ZooKeeperPermanentStore<ServiceProperties> store = new ZooKeeperPermanentStore<>(conn, new ServicePropertiesJsonSerializer(), ZKFSUtil.servicePath(BASE_PATH));
        callback = new FutureCallback<>();
        store.start(callback);
        callback.get(30, TimeUnit.SECONDS);
        ServiceProperties props = new ServiceProperties(TEST_SERVICE_NAME, TEST_CLUSTER_NAME, "/somePath", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, Arrays.asList("http"), null);
        store.put(TEST_SERVICE_NAME, props);
        ZooKeeperPermanentStore<ClusterProperties> clusterStore = new ZooKeeperPermanentStore<>(conn, new ClusterPropertiesJsonSerializer(), ZKFSUtil.clusterPath(BASE_PATH));
        callback = new FutureCallback<>();
        clusterStore.start(callback);
        callback.get(30, TimeUnit.SECONDS);
        ClusterProperties clusterProps = new ClusterProperties("someCluster");
        clusterStore.put(TEST_CLUSTER_NAME, clusterProps);
        ZKConnection serverConn = new ZKConnection("localhost:" + PORT, 30000);
        serverConn.start();
        ZooKeeperEphemeralStore<UriProperties> uriStore = new ZooKeeperEphemeralStore<>(serverConn, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), ZKFSUtil.uriPath(BASE_PATH));
        callback = new FutureCallback<>();
        uriStore.start(callback);
        callback.get(30, TimeUnit.SECONDS);
        ZooKeeperServer server = new ZooKeeperServer(uriStore);
        callback = new FutureCallback<>();
        Map<Integer, PartitionData> partitionDataMap = new HashMap<>();
        partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1.0));
        server.markUp(TEST_CLUSTER_NAME, URI.create("http://test.uri"), partitionDataMap, callback);
        callback.get(30, TimeUnit.SECONDS);
        URIRequest request = new URIRequest("d2://" + TEST_SERVICE_NAME + "/foo");
        TransportClient client = balancer.getClient(request, new RequestContext());
        // Stop the server to cause a disconnect event
        stopServer();
        // Sleep to ensure the disconnect has propagated; ideally the Toggle should expose
        // some interface to allow detection that the toggle occurred
        Thread.sleep(1000);
        // Now see if it still works
        client = balancer.getClient(request, new RequestContext());
    } finally {
        stopServer();
    }
}
Also used : ServicePropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer) HashMap(java.util.HashMap) ZooKeeperPermanentStore(com.linkedin.d2.discovery.stores.zk.ZooKeeperPermanentStore) UriPropertiesMerger(com.linkedin.d2.balancer.properties.UriPropertiesMerger) UriPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.UriPropertiesJsonSerializer) ClusterPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ClusterPropertiesJsonSerializer) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) RequestContext(com.linkedin.r2.message.RequestContext) FutureCallback(com.linkedin.common.callback.FutureCallback) TransportClient(com.linkedin.r2.transport.common.bridge.client.TransportClient) ZKConnection(com.linkedin.d2.discovery.stores.zk.ZKConnection) URIRequest(com.linkedin.d2.balancer.util.URIRequest) ZooKeeperEphemeralStore(com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) None(com.linkedin.common.util.None) ZooKeeperServer(com.linkedin.d2.balancer.servers.ZooKeeperServer) Test(org.testng.annotations.Test)

Example 72 with Server

use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.

the class CacheClientExample method sendTraffic.

private static void sendTraffic(Map<String, Long> trafficProportion, D2Client d2Client, List<String> keys) throws Exception {
    for (Map.Entry<String, Long> proportion : trafficProportion.entrySet()) {
        long queryPerSecond = proportion.getValue();
        String serviceName = proportion.getKey();
        Random random = new Random();
        for (int i = 0; i < queryPerSecond; i++) {
            final URI uri = new URI("d2://" + serviceName + "?key=" + keys.get(random.nextInt(keys.size())));
            RestRequestBuilder requestBuilder = new RestRequestBuilder(uri).setMethod("get");
            RestRequest request = requestBuilder.build();
            // we don't care about the result from the server after all,
            // you can see the traffic hits the echo server from stdout
            d2Client.restRequest(request, new Callback<RestResponse>() {

                @Override
                public void onError(Throwable e) {
                    System.err.println("URI = " + uri.toString() + " didn't get any response");
                }

                @Override
                public void onSuccess(RestResponse result) {
                    System.out.println("URI = " + uri.toString() + " was served by " + result.getEntity().asString("UTF-8"));
                }
            });
        }
    }
}
Also used : RestResponse(com.linkedin.r2.message.rest.RestResponse) URI(java.net.URI) RestRequest(com.linkedin.r2.message.rest.RestRequest) Random(java.util.Random) RestRequestBuilder(com.linkedin.r2.message.rest.RestRequestBuilder) Map(java.util.Map)

Example 73 with Server

use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.

the class BigDataClientExample method sendTraffic.

private static void sendTraffic(Map<String, Long> trafficProportion, D2Client d2Client, final String name) throws Exception {
    for (Map.Entry<String, Long> proportion : trafficProportion.entrySet()) {
        long queryPerSecond = proportion.getValue();
        String serviceName = proportion.getKey();
        for (int i = 0; i < queryPerSecond; i++) {
            final URI uri = new URI("d2://" + serviceName);
            final long sent = System.currentTimeMillis();
            RestRequestBuilder requestBuilder = new RestRequestBuilder(uri).setMethod("get");
            RestRequest request = requestBuilder.build();
            // we don't care about the result from the server after all,
            // you can see the traffic hits the echo server from stdout
            d2Client.restRequest(request, new Callback<RestResponse>() {

                @Override
                public void onError(Throwable e) {
                    System.err.println(name + " sending URI = " + uri.toString() + " didn't get any response after " + (System.currentTimeMillis() - sent) + " ms");
                }

                @Override
                public void onSuccess(RestResponse result) {
                    System.out.println(name + " sending URI = " + uri.toString() + " was served by " + result.getEntity().asString("UTF-8") + " after " + (System.currentTimeMillis() - sent) + " ms");
                }
            });
        }
    }
}
Also used : RestResponse(com.linkedin.r2.message.rest.RestResponse) URI(java.net.URI) RestRequest(com.linkedin.r2.message.rest.RestRequest) RestRequestBuilder(com.linkedin.r2.message.rest.RestRequestBuilder) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Map(java.util.Map)

Example 74 with Server

use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.

the class SimpleLoadBalancerTest method testLoadBalancerWithPartitionsSmoke.

// load balancer working with partitioned cluster
@Test(groups = { "small", "back-end" })
public void testLoadBalancerWithPartitionsSmoke() throws URISyntaxException, ServiceUnavailableException, InterruptedException, ExecutionException {
    for (int tryAgain = 0; tryAgain < 12; ++tryAgain) {
        Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>();
        Map<String, TransportClientFactory> clientFactories = new HashMap<>();
        List<String> prioritizedSchemes = new ArrayList<>();
        MockStore<ServiceProperties> serviceRegistry = new MockStore<>();
        MockStore<ClusterProperties> clusterRegistry = new MockStore<>();
        MockStore<UriProperties> uriRegistry = new MockStore<>();
        ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
        loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
        clientFactories.put(PropertyKeys.HTTP_SCHEME, new DoNothingClientFactory());
        SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriRegistry, clusterRegistry, serviceRegistry, clientFactories, loadBalancerStrategyFactories);
        SimpleLoadBalancer loadBalancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS, executorService);
        FutureCallback<None> balancerCallback = new FutureCallback<>();
        loadBalancer.start(balancerCallback);
        balancerCallback.get();
        URI uri1 = URI.create("http://test.qa1.com:1234");
        URI uri2 = URI.create("http://test.qa2.com:2345");
        URI uri3 = URI.create("http://test.qa3.com:6789");
        Map<URI, Double> uris = new HashMap<>();
        uris.put(uri1, 1d);
        uris.put(uri2, 1d);
        uris.put(uri3, 1d);
        Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<>();
        Map<Integer, PartitionData> server1 = new HashMap<>();
        server1.put(0, new PartitionData(1d));
        server1.put(1, new PartitionData(1d));
        Map<Integer, PartitionData> server2 = new HashMap<>();
        server2.put(0, new PartitionData(1d));
        Map<Integer, PartitionData> server3 = new HashMap<>();
        server3.put(1, new PartitionData(1d));
        partitionDesc.put(uri1, server1);
        partitionDesc.put(uri2, server2);
        partitionDesc.put(uri3, server3);
        prioritizedSchemes.add(PropertyKeys.HTTP_SCHEME);
        int partitionMethod = tryAgain % 4;
        switch(partitionMethod) {
            case 0:
                clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<>(), new HashSet<>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 50, 2)));
                break;
            case 1:
                clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<>(), new HashSet<>(), new HashBasedPartitionProperties("id=(\\d+)", 2, HashBasedPartitionProperties.HashAlgorithm.valueOf("MODULO"))));
                break;
            case 2:
                clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<>(), new HashSet<>(), new HashBasedPartitionProperties("id=(\\d+)", 2, HashBasedPartitionProperties.HashAlgorithm.valueOf("MD5"))));
                break;
            case 3:
                // test getRings with gap. here, no server serves partition 2
                clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<>(), new HashSet<>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 50, 4)));
                server3.put(3, new PartitionData(1d));
                partitionDesc.put(uri3, server3);
                break;
            default:
                break;
        }
        serviceRegistry.put("foo", new ServiceProperties("foo", "cluster-1", "/foo", Arrays.asList("degrader"), Collections.singletonMap(PropertyKeys.HTTP_LB_CONSISTENT_HASH_ALGORITHM, "pointBased"), null, null, prioritizedSchemes, null));
        uriRegistry.put("cluster-1", new UriProperties("cluster-1", partitionDesc));
        if (partitionMethod == 3) {
            Map<Integer, Ring<URI>> ringMap = loadBalancer.getRings(URI.create("d2://foo"));
            assertEquals(ringMap.size(), 4);
            // the ring for partition 2 should be empty
            assertEquals(ringMap.get(2).toString(), new ConsistentHashRing<>(Collections.emptyList()).toString());
            continue;
        }
        URI expectedUri1 = URI.create("http://test.qa1.com:1234/foo");
        URI expectedUri2 = URI.create("http://test.qa2.com:2345/foo");
        URI expectedUri3 = URI.create("http://test.qa3.com:6789/foo");
        Set<URI> expectedUris = new HashSet<>();
        expectedUris.add(expectedUri1);
        expectedUris.add(expectedUri2);
        expectedUris.add(expectedUri3);
        for (int i = 0; i < 1000; ++i) {
            int ii = i % 100;
            RewriteLoadBalancerClient client = (RewriteLoadBalancerClient) loadBalancer.getClient(new URIRequest("d2://foo/id=" + ii), new RequestContext());
            String clientUri = client.getUri().toString();
            HashFunction<String[]> hashFunction = null;
            String[] str = new String[1];
            // test KeyMapper target host hint: request is always to target host regardless of what's in d2 URI and whether it's hash-based or range-based partitions
            RequestContext requestContextWithHint = new RequestContext();
            KeyMapper.TargetHostHints.setRequestContextTargetHost(requestContextWithHint, uri1);
            RewriteLoadBalancerClient hintedClient1 = (RewriteLoadBalancerClient) loadBalancer.getClient(new URIRequest("d2://foo/id=" + ii), requestContextWithHint);
            String hintedUri1 = hintedClient1.getUri().toString();
            Assert.assertEquals(hintedUri1, uri1.toString() + "/foo");
            RewriteLoadBalancerClient hintedClient2 = (RewriteLoadBalancerClient) loadBalancer.getClient(new URIRequest("d2://foo/action=purge-all"), requestContextWithHint);
            String hintedUri2 = hintedClient2.getUri().toString();
            Assert.assertEquals(hintedUri2, uri1.toString() + "/foo");
            if (partitionMethod == 2) {
                hashFunction = new MD5Hash();
            }
            for (URI uri : expectedUris) {
                if (clientUri.contains(uri.toString())) {
                    // check if only key belonging to partition 0 gets uri2
                    if (uri.equals(uri2)) {
                        if (partitionMethod == 0) {
                            assertTrue(ii < 50);
                        } else if (partitionMethod == 1) {
                            assertTrue(ii % 2 == 0);
                        } else {
                            str[0] = ii + "";
                            assertTrue(hashFunction.hash(str) % 2 == 0);
                        }
                    }
                    // check if only key belonging to partition 1 gets uri3
                    if (uri.equals(uri3)) {
                        if (partitionMethod == 0) {
                            assertTrue(ii >= 50);
                        } else if (partitionMethod == 1) {
                            assertTrue(ii % 2 == 1);
                        } else {
                            str[0] = ii + "";
                            assertTrue(hashFunction.hash(str) % 2 == 1);
                        }
                    }
                }
            }
        }
        // two rings for two partitions
        Map<Integer, Ring<URI>> ringMap = loadBalancer.getRings(URI.create("d2://foo"));
        assertEquals(ringMap.size(), 2);
        if (partitionMethod != 2) {
            Set<String> keys = new HashSet<>();
            for (int j = 0; j < 50; j++) {
                if (partitionMethod == 0) {
                    keys.add(j + "");
                } else {
                    keys.add(j * 2 + "");
                }
            }
            // if it is range based partition, all keys from 0 ~ 49 belong to partition 0 according to the range definition
            // if it is modulo based partition, all even keys belong to partition 0 because the partition count is 2
            // only from partition 0
            MapKeyResult<Ring<URI>, String> mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
            Map<Ring<URI>, Collection<String>> keyToPartition = mapKeyResult.getMapResult();
            assertEquals(keyToPartition.size(), 1);
            for (Ring<URI> ring : keyToPartition.keySet()) {
                assertEquals(ring, ringMap.get(0));
            }
            // now also from partition 1
            keys.add("51");
            mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
            assertEquals(mapKeyResult.getMapResult().size(), 2);
            assertEquals(mapKeyResult.getUnmappedKeys().size(), 0);
            // now only from partition 1
            keys.clear();
            keys.add("99");
            mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
            keyToPartition = mapKeyResult.getMapResult();
            assertEquals(keyToPartition.size(), 1);
            assertEquals(mapKeyResult.getUnmappedKeys().size(), 0);
            for (Ring<URI> ring : keyToPartition.keySet()) {
                assertEquals(ring, ringMap.get(1));
            }
            keys.add("100");
            mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
            if (partitionMethod == 0) {
                // key out of range
                Collection<MapKeyResult.UnmappedKey<String>> unmappedKeys = mapKeyResult.getUnmappedKeys();
                assertEquals(unmappedKeys.size(), 1);
            }
            try {
                loadBalancer.getClient(new URIRequest("d2://foo/id=100"), new RequestContext());
                if (partitionMethod == 0) {
                    // key out of range
                    fail("Should throw ServiceUnavailableException caused by PartitionAccessException");
                }
            } catch (ServiceUnavailableException e) {
            }
        }
        final CountDownLatch latch = new CountDownLatch(1);
        PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {

            @Override
            public void done() {
                latch.countDown();
            }
        };
        state.shutdown(callback);
        if (!latch.await(60, TimeUnit.SECONDS)) {
            fail("unable to shutdown state");
        }
        executorService.shutdownNow();
        assertTrue(executorService.isShutdown(), "ExecutorService should have shut down!");
    }
}
Also used : HashMap(java.util.HashMap) DegraderLoadBalancerStrategyFactoryV3(com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyFactoryV3) ArrayList(java.util.ArrayList) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) RangeBasedPartitionProperties(com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties) RequestContext(com.linkedin.r2.message.RequestContext) FutureCallback(com.linkedin.common.callback.FutureCallback) HashSet(java.util.HashSet) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) RandomLoadBalancerStrategyFactory(com.linkedin.d2.balancer.strategies.random.RandomLoadBalancerStrategyFactory) LoadBalancerStrategyFactory(com.linkedin.d2.balancer.strategies.LoadBalancerStrategyFactory) HashBasedPartitionProperties(com.linkedin.d2.balancer.properties.HashBasedPartitionProperties) RewriteLoadBalancerClient(com.linkedin.d2.balancer.clients.RewriteLoadBalancerClient) URIRequest(com.linkedin.d2.balancer.util.URIRequest) Collection(java.util.Collection) Map(java.util.Map) DarkClusterConfigMap(com.linkedin.d2.DarkClusterConfigMap) HashMap(java.util.HashMap) PropertyEventShutdownCallback(com.linkedin.d2.discovery.event.PropertyEventThread.PropertyEventShutdownCallback) MockStore(com.linkedin.d2.discovery.stores.mock.MockStore) ServiceUnavailableException(com.linkedin.d2.balancer.ServiceUnavailableException) URI(java.net.URI) ConsistentHashRing(com.linkedin.d2.balancer.util.hashing.ConsistentHashRing) TransportClientFactory(com.linkedin.r2.transport.common.TransportClientFactory) LoadBalancerStrategy(com.linkedin.d2.balancer.strategies.LoadBalancerStrategy) CountDownLatch(java.util.concurrent.CountDownLatch) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) ConsistentHashRing(com.linkedin.d2.balancer.util.hashing.ConsistentHashRing) Ring(com.linkedin.d2.balancer.util.hashing.Ring) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) MD5Hash(com.linkedin.d2.balancer.util.hashing.MD5Hash) None(com.linkedin.common.util.None) Test(org.testng.annotations.Test)

Example 75 with Server

use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.

the class SimpleLoadBalancerStateTest method testClientsShutdownAfterPropertyUpdatesRestRequest.

@Test(groups = { "small", "back-end" })
public void testClientsShutdownAfterPropertyUpdatesRestRequest() throws URISyntaxException, InterruptedException {
    reset();
    URI uri = URI.create("http://cluster-1/test");
    List<String> schemes = new ArrayList<>();
    Map<Integer, PartitionData> partitionData = new HashMap<>(1);
    partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
    Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<>();
    uriData.put(uri, partitionData);
    schemes.add("http");
    // set up state
    _state.listenToService("service-1", new NullStateListenerCallback());
    _state.listenToCluster("cluster-1", new NullStateListenerCallback());
    _state.setDelayedExecution(0);
    _serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random"), Collections.<String, Object>emptyMap(), Collections.<String, Object>emptyMap(), Collections.<String, String>emptyMap(), schemes, Collections.<URI>emptySet()));
    _clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
    _uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));
    URI uri1 = URI.create("http://partition-cluster-1/test1");
    URI uri2 = URI.create("http://partition-cluster-1/test2");
    _state.listenToCluster("partition-cluster-1", new NullStateListenerCallback());
    _clusterRegistry.put("partition-cluster-1", new ClusterProperties("partition-cluster-1", null, new HashMap<>(), new HashSet<>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 100, 2)));
    _state.listenToService("partition-service-1", new NullStateListenerCallback());
    _serviceRegistry.put("partition-service-1", new ServiceProperties("partition-service-1", "partition-cluster-1", "/partition-test", Arrays.asList("degraderV3"), Collections.<String, Object>emptyMap(), Collections.<String, Object>emptyMap(), Collections.<String, String>emptyMap(), schemes, Collections.<URI>emptySet()));
    Map<Integer, PartitionData> partitionWeight = new HashMap<>();
    partitionWeight.put(0, new PartitionData(1d));
    partitionWeight.put(1, new PartitionData(2d));
    Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<>();
    partitionDesc.put(uri1, partitionWeight);
    partitionWeight.remove(0);
    partitionWeight.put(2, new PartitionData(1d));
    partitionDesc.put(uri2, partitionWeight);
    _uriRegistry.put("partition-cluster-1", new UriProperties("partition-cluster-1", partitionDesc));
    TrackerClient client1 = _state.getClient("partition-service-1", uri1);
    TrackerClient client2 = _state.getClient("partition-service-1", uri2);
    assertEquals(client2.getPartitionWeight(1), 2d);
    assertEquals(client2.getPartitionWeight(2), 1d);
    assertEquals(client1.getPartitionWeight(1), 2d);
    // Get client, then refresh cluster
    TrackerClient client = _state.getClient("service-1", uri);
    client.restRequest(new RestRequestBuilder(URI.create("d2://service-1/foo")).build(), new RequestContext(), Collections.emptyMap(), new TransportCallbackAdapter<>(Callbacks.empty()));
    // now force a refresh by adding cluster
    _clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
    // Get client, then refresh service
    client = _state.getClient("service-1", uri);
    client.restRequest(new RestRequestBuilder(URI.create("d2://service-1/foo")).build(), new RequestContext(), Collections.emptyMap(), new TransportCallbackAdapter<>(Callbacks.empty()));
    // refresh by adding service
    _serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random"), Collections.<String, Object>emptyMap(), null, null, schemes, null));
    // Get client, then mark server up/down
    client = _state.getClient("service-1", uri);
    client.restRequest(new RestRequestBuilder(URI.create("d2://service-1/foo")).build(), new RequestContext(), Collections.emptyMap(), new TransportCallbackAdapter<>(Callbacks.empty()));
    _uriRegistry.put("cluster-1", new UriProperties("cluster-1", Collections.<URI, Map<Integer, PartitionData>>emptyMap()));
    _uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));
    // Get the client one last time
    client = _state.getClient("service-1", uri);
    client.restRequest(new RestRequestBuilder(URI.create("d2://service-1/foo")).build(), new RequestContext(), Collections.emptyMap(), new TransportCallbackAdapter<>(Callbacks.empty()));
    TestShutdownCallback callback = new TestShutdownCallback();
    _state.shutdown(callback);
    assertTrue(callback.await(10, TimeUnit.SECONDS), "Failed to shut down state");
    for (TransportClientFactory factory : _clientFactories.values()) {
        SimpleLoadBalancerTest.DoNothingClientFactory f = (SimpleLoadBalancerTest.DoNothingClientFactory) factory;
        assertEquals(f.getRunningClientCount(), 0, "not all clients were shut down");
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) URI(java.net.URI) TrackerClient(com.linkedin.d2.balancer.clients.TrackerClient) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) RangeBasedPartitionProperties(com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties) RequestContext(com.linkedin.r2.message.RequestContext) TransportClientFactory(com.linkedin.r2.transport.common.TransportClientFactory) HashSet(java.util.HashSet) NullStateListenerCallback(com.linkedin.d2.balancer.LoadBalancerState.NullStateListenerCallback) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) RestRequestBuilder(com.linkedin.r2.message.rest.RestRequestBuilder) Map(java.util.Map) HashMap(java.util.HashMap) Test(org.testng.annotations.Test) DegraderLoadBalancerTest(com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerTest)

Aggregations

Test (org.testng.annotations.Test)82 RestRequest (com.linkedin.r2.message.rest.RestRequest)52 RestRequestBuilder (com.linkedin.r2.message.rest.RestRequestBuilder)50 RequestContext (com.linkedin.r2.message.RequestContext)49 URI (java.net.URI)43 RestResponse (com.linkedin.r2.message.rest.RestResponse)41 StreamRequest (com.linkedin.r2.message.stream.StreamRequest)32 FutureCallback (com.linkedin.common.callback.FutureCallback)30 StreamResponse (com.linkedin.r2.message.stream.StreamResponse)30 CountDownLatch (java.util.concurrent.CountDownLatch)24 StreamRequestBuilder (com.linkedin.r2.message.stream.StreamRequestBuilder)23 RestException (com.linkedin.r2.message.rest.RestException)21 ExecutionException (java.util.concurrent.ExecutionException)21 None (com.linkedin.common.util.None)20 Server (org.eclipse.jetty.server.Server)20 ByteString (com.linkedin.data.ByteString)19 HttpServerBuilder (com.linkedin.r2.testutils.server.HttpServerBuilder)19 HashMap (java.util.HashMap)15 RemoteInvocationException (com.linkedin.r2.RemoteInvocationException)13 ArrayList (java.util.ArrayList)13