Search in sources :

Example 1 with PropertyEventThread

use of com.linkedin.d2.discovery.event.PropertyEventThread in project rest.li by linkedin.

the class PropertyEventThreadTest method testUnstartedThread.

@Test(groups = { "small", "back-end" })
public void testUnstartedThread() {
    PropertyEventThread thread = new PropertyEventThread("test", 1001);
    assertEquals(thread.getRemainingCapacity(), 1001);
    assertEquals(thread.getQueuedMessageCount(), 0);
    assertFalse(thread.isAlive());
    assertTrue(thread.isDaemon());
// assertFalse(thread.send(null));
// assertFalse(thread.send(new PropertyTestEvent("donothing")));
}
Also used : PropertyEventThread(com.linkedin.d2.discovery.event.PropertyEventThread) Test(org.testng.annotations.Test)

Example 2 with PropertyEventThread

use of com.linkedin.d2.discovery.event.PropertyEventThread in project rest.li by linkedin.

the class PropertyEventThreadTest method testThread.

@Test(groups = { "small", "back-end" })
public void testThread() throws InterruptedException {
    PropertyEventThread thread = new PropertyEventThread("test");
    PropertyTestEvent testEvent = new PropertyTestEvent("counter");
    // Test doesn't make sense with hacked PropertyEventThread
    // assertFalse(thread.send(testEvent));
    thread.start();
    assertTrue(thread.send(testEvent));
    assertTrue(thread.send(testEvent));
    thread.interrupt();
    thread.join(0);
    // Also doesn't make sense with hack
    // assertFalse(thread.send(testEvent));
    assertEquals(testEvent.getCount(), 2);
}
Also used : PropertyEventThread(com.linkedin.d2.discovery.event.PropertyEventThread) Test(org.testng.annotations.Test)

Example 3 with PropertyEventThread

use of com.linkedin.d2.discovery.event.PropertyEventThread in project rest.li by linkedin.

the class LoadBalancerEchoClient method getLoadBalancer.

public static SimpleLoadBalancer getLoadBalancer(String hostPort) throws IOException, PropertyStoreException {
    // zk stores
    ZooKeeperPermanentStore<ClusterProperties> zkClusterRegistry = null;
    ZooKeeperPermanentStore<ServiceProperties> zkServiceRegistry = null;
    ZooKeeperEphemeralStore<UriProperties> zkUriRegistry = null;
    ZKConnection zkClient = new ZKConnection(hostPort, 10000);
    zkClusterRegistry = new ZooKeeperPermanentStore<>(zkClient, new ClusterPropertiesJsonSerializer(), _basePath + "/clusters");
    zkServiceRegistry = new ZooKeeperPermanentStore<>(zkClient, new ServicePropertiesJsonSerializer(), _basePath + "/services");
    zkUriRegistry = new ZooKeeperEphemeralStore<>(zkClient, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), _basePath + "/uris", false, true);
    // fs stores
    File testDirectory = LoadBalancerUtil.createTempDirectory("lb-degrader-witih-file-store-large");
    testDirectory.deleteOnExit();
    new File(testDirectory + File.separator + "cluster").mkdir();
    new File(testDirectory + File.separator + "service").mkdir();
    new File(testDirectory + File.separator + "uri").mkdir();
    FileStore<ClusterProperties> fsClusterStore = new FileStore<>(testDirectory + File.separator + "cluster", FileSystemDirectory.FILE_STORE_EXTENSION, new ClusterPropertiesJsonSerializer());
    FileStore<ServiceProperties> fsServiceStore = new FileStore<>(testDirectory + File.separator + "service", FileSystemDirectory.FILE_STORE_EXTENSION, new ServicePropertiesJsonSerializer());
    FileStore<UriProperties> fsUriStore = new FileStore<>(testDirectory + File.separator + "uri", FileSystemDirectory.FILE_STORE_EXTENSION, new UriPropertiesJsonSerializer());
    // chains
    PropertyEventThread thread = new PropertyEventThread("echo client event thread");
    ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("echo client event thread"));
    // start up the world
    thread.start();
    PropertyEventBus<ServiceProperties> serviceBus = new PropertyEventBusImpl<>(executorService, zkServiceRegistry);
    serviceBus.register(fsServiceStore);
    new ZooKeeperTogglingStore<>(zkServiceRegistry, fsServiceStore, serviceBus, true);
    PropertyEventBus<UriProperties> uriBus = new PropertyEventBusImpl<>(executorService, zkUriRegistry);
    uriBus.register(fsUriStore);
    new ZooKeeperTogglingStore<>(zkUriRegistry, fsUriStore, uriBus, true);
    PropertyEventBus<ClusterProperties> clusterBus = new PropertyEventBusImpl<>(executorService, zkClusterRegistry);
    clusterBus.register(fsClusterStore);
    new ZooKeeperTogglingStore<>(zkClusterRegistry, fsClusterStore, clusterBus, true);
    Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>();
    // strategy and scheme factories
    loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
    Map<String, TransportClientFactory> clientFactories = new HashMap<>();
    clientFactories.put("http", new HttpClientFactory.Builder().build());
    // create the state
    SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriBus, clusterBus, serviceBus, clientFactories, loadBalancerStrategyFactories, null, null, false);
    SimpleLoadBalancer balancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS, executorService);
    new JmxManager().registerLoadBalancer("balancer", balancer).registerLoadBalancerState("state", state);
    return balancer;
}
Also used : ServicePropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer) HashMap(java.util.HashMap) DegraderLoadBalancerStrategyFactoryV3(com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyFactoryV3) RestRequestBuilder(com.linkedin.r2.message.rest.RestRequestBuilder) UriPropertiesMerger(com.linkedin.d2.balancer.properties.UriPropertiesMerger) ZooKeeperTogglingStore(com.linkedin.d2.discovery.stores.zk.ZooKeeperTogglingStore) UriPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.UriPropertiesJsonSerializer) JmxManager(com.linkedin.d2.jmx.JmxManager) ClusterPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ClusterPropertiesJsonSerializer) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) TransportClientFactory(com.linkedin.r2.transport.common.TransportClientFactory) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SimpleLoadBalancerState(com.linkedin.d2.balancer.simple.SimpleLoadBalancerState) SimpleLoadBalancer(com.linkedin.d2.balancer.simple.SimpleLoadBalancer) ZKConnection(com.linkedin.d2.discovery.stores.zk.ZKConnection) LoadBalancerStrategyFactory(com.linkedin.d2.balancer.strategies.LoadBalancerStrategyFactory) NamedThreadFactory(com.linkedin.r2.util.NamedThreadFactory) LoadBalancerStrategy(com.linkedin.d2.balancer.strategies.LoadBalancerStrategy) FileStore(com.linkedin.d2.discovery.stores.file.FileStore) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) PropertyEventBusImpl(com.linkedin.d2.discovery.event.PropertyEventBusImpl) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) File(java.io.File) PropertyEventThread(com.linkedin.d2.discovery.event.PropertyEventThread)

Example 4 with PropertyEventThread

use of com.linkedin.d2.discovery.event.PropertyEventThread in project rest.li by linkedin.

the class SimpleLoadBalancerSimulation method verifyState.

/**
 * Compare the simulator's view of reality with the load balancer's. This method should
 * be called after every step is performed and all threads have finished.
 */
public void verifyState() {
    // verify that we consumed all messages before we do anything
    for (int i = 0; i < _queues.length; ++i) {
        if (_queues[i].size() > 0) {
            fail("there were messages left in the queue. all messages should have been consumed during this simulation step.");
        }
    }
    // verify that all clients have been shut down
    for (Map.Entry<String, TransportClientFactory> e : _clientFactories.entrySet()) {
        DoNothingClientFactory factory = (DoNothingClientFactory) e.getValue();
        if (factory.getRunningClientCount() != 0) {
            fail("Not all clients were shut down from factory " + e.getKey());
        }
    }
    try {
        final CountDownLatch latch = new CountDownLatch(1);
        PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {

            @Override
            public void done() {
                latch.countDown();
            }
        };
        _state.shutdown(callback);
        if (!latch.await(60, TimeUnit.SECONDS)) {
            fail("unable to shutdown state");
        }
    } catch (InterruptedException e) {
        fail("unable to shutdown state in verifyState.");
    }
    // New load balancer with no timeout; the code below checks for services that don't
    // exist,
    // and a load balancer with non-zero timeout will just timeout waiting for them to be
    // registered, which will never happen because the PropertyEventThread is shut down.
    _loadBalancer = new SimpleLoadBalancer(_state, 0, TimeUnit.SECONDS, _executorService);
    // verify services are as we expect
    for (String possibleService : _possibleServices) {
        // about it
        if (!_expectedServiceProperties.containsKey(possibleService) || !_state.isListeningToService(possibleService)) {
            LoadBalancerStateItem<ServiceProperties> serviceItem = _state.getServiceProperties(possibleService);
            assertTrue(serviceItem == null || serviceItem.getProperty() == null);
        } else {
            ServiceProperties serviceProperties = _expectedServiceProperties.get(possibleService);
            ClusterProperties clusterProperties = _expectedClusterProperties.get(serviceProperties.getClusterName());
            UriProperties uriProperties = _expectedUriProperties.get(serviceProperties.getClusterName());
            assertEquals(_state.getServiceProperties(possibleService).getProperty(), serviceProperties);
            // verify round robin'ing of the hosts for this service
            for (int i = 0; i < 100; ++i) {
                try {
                    // this call will queue up messages if we're not listening to the service, but
                    // it's ok, because all of the messengers have been stopped.
                    final TransportClient client = _loadBalancer.getClient(new URIRequest("d2://" + possibleService + random(_possiblePaths)), new RequestContext());
                    // if we didn't receive service unavailable, we should
                    // get a client back
                    assertNotNull(client, "Not found client for: d2://" + possibleService + random(_possiblePaths));
                } catch (ServiceUnavailableException e) {
                    if (uriProperties != null && clusterProperties != null) {
                        // only way to get here is if the prioritized
                        // schemes could find no available uris in the
                        // cluster. let's see if we can find a URI that
                        // matches a prioritized scheme in the cluster.
                        Set<String> schemes = new HashSet<>();
                        for (URI uri : uriProperties.Uris()) {
                            schemes.add(uri.getScheme());
                        }
                        for (String scheme : clusterProperties.getPrioritizedSchemes()) {
                            // the code.
                            if (schemes.contains(scheme) && _clientFactories.containsKey(scheme)) {
                                break;
                            }
                            assertFalse(schemes.contains(scheme) && _clientFactories.containsKey(scheme), "why couldn't a client be found for schemes " + clusterProperties.getPrioritizedSchemes() + " with URIs: " + uriProperties.Uris());
                        }
                    }
                }
            }
        }
    }
    // verify clusters are as we expect
    for (String possibleCluster : _possibleClusters) {
        LoadBalancerStateItem<ClusterProperties> clusterItem = _state.getClusterProperties(possibleCluster);
        if (!_expectedClusterProperties.containsKey(possibleCluster) || !_state.isListeningToCluster(possibleCluster)) {
            assertTrue(clusterItem == null || clusterItem.getProperty() == null, "cluster item for " + possibleCluster + " is not null: " + clusterItem);
        } else {
            assertNotNull(clusterItem, "Item for cluster " + possibleCluster + " should not be null, listening: " + _state.isListeningToCluster(possibleCluster) + ", keys: " + _expectedClusterProperties.keySet());
            assertEquals(clusterItem.getProperty(), _expectedClusterProperties.get(possibleCluster));
        }
    }
    // verify uris are as we expect
    for (String possibleCluster : _possibleClusters) {
        LoadBalancerStateItem<UriProperties> uriItem = _state.getUriProperties(possibleCluster);
        if (!_expectedUriProperties.containsKey(possibleCluster) || !_state.isListeningToCluster(possibleCluster)) {
            assertTrue(uriItem == null || uriItem.getProperty() == null);
        } else {
            assertNotNull(uriItem);
            assertEquals(uriItem.getProperty(), _expectedUriProperties.get(possibleCluster));
        }
    }
}
Also used : PropertyEventShutdownCallback(com.linkedin.d2.discovery.event.PropertyEventThread.PropertyEventShutdownCallback) TransportClient(com.linkedin.r2.transport.common.bridge.client.TransportClient) Set(java.util.Set) HashSet(java.util.HashSet) SimpleLoadBalancer(com.linkedin.d2.balancer.simple.SimpleLoadBalancer) URIRequest(com.linkedin.d2.balancer.util.URIRequest) ServiceUnavailableException(com.linkedin.d2.balancer.ServiceUnavailableException) CountDownLatch(java.util.concurrent.CountDownLatch) URI(java.net.URI) DoNothingClientFactory(com.linkedin.d2.balancer.simple.SimpleLoadBalancerTest.DoNothingClientFactory) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) RequestContext(com.linkedin.r2.message.RequestContext) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) TransportClientFactory(com.linkedin.r2.transport.common.TransportClientFactory)

Aggregations

PropertyEventThread (com.linkedin.d2.discovery.event.PropertyEventThread)3 ClusterProperties (com.linkedin.d2.balancer.properties.ClusterProperties)2 ServiceProperties (com.linkedin.d2.balancer.properties.ServiceProperties)2 UriProperties (com.linkedin.d2.balancer.properties.UriProperties)2 SimpleLoadBalancer (com.linkedin.d2.balancer.simple.SimpleLoadBalancer)2 TransportClientFactory (com.linkedin.r2.transport.common.TransportClientFactory)2 HashMap (java.util.HashMap)2 Test (org.testng.annotations.Test)2 ServiceUnavailableException (com.linkedin.d2.balancer.ServiceUnavailableException)1 ClusterPropertiesJsonSerializer (com.linkedin.d2.balancer.properties.ClusterPropertiesJsonSerializer)1 ServicePropertiesJsonSerializer (com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer)1 UriPropertiesJsonSerializer (com.linkedin.d2.balancer.properties.UriPropertiesJsonSerializer)1 UriPropertiesMerger (com.linkedin.d2.balancer.properties.UriPropertiesMerger)1 SimpleLoadBalancerState (com.linkedin.d2.balancer.simple.SimpleLoadBalancerState)1 DoNothingClientFactory (com.linkedin.d2.balancer.simple.SimpleLoadBalancerTest.DoNothingClientFactory)1 LoadBalancerStrategy (com.linkedin.d2.balancer.strategies.LoadBalancerStrategy)1 LoadBalancerStrategyFactory (com.linkedin.d2.balancer.strategies.LoadBalancerStrategyFactory)1 DegraderLoadBalancerStrategyFactoryV3 (com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyFactoryV3)1 URIRequest (com.linkedin.d2.balancer.util.URIRequest)1 PropertyEventBusImpl (com.linkedin.d2.discovery.event.PropertyEventBusImpl)1