Search in sources :

Example 6 with ZooKeeperEphemeralStore

use of com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore in project rest.li by linkedin.

the class ZKFSTogglingLoadBalancerFactoryImpl method createLoadBalancer.

@Override
public TogglingLoadBalancer createLoadBalancer(ZKConnection zkConnection, ScheduledExecutorService executorService) {
    _log.info("Using d2ServicePath: " + _d2ServicePath);
    ZooKeeperPermanentStore<ClusterProperties> zkClusterRegistry = createPermanentStore(zkConnection, ZKFSUtil.clusterPath(_baseZKPath), new ClusterPropertiesJsonSerializer());
    ZooKeeperPermanentStore<ServiceProperties> zkServiceRegistry = createPermanentStore(zkConnection, ZKFSUtil.servicePath(_baseZKPath, _d2ServicePath), new ServicePropertiesJsonSerializer());
    ZooKeeperEphemeralStore<UriProperties> zkUriRegistry = createEphemeralStore(zkConnection, ZKFSUtil.uriPath(_baseZKPath), new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), _useNewEphemeralStoreWatcher);
    FileStore<ClusterProperties> fsClusterStore = createFileStore("clusters", new ClusterPropertiesJsonSerializer());
    FileStore<ServiceProperties> fsServiceStore = createFileStore(_d2ServicePath, new ServicePropertiesJsonSerializer());
    FileStore<UriProperties> fsUriStore = createFileStore("uris", new UriPropertiesJsonSerializer());
    PropertyEventBus<ClusterProperties> clusterBus = new PropertyEventBusImpl<ClusterProperties>(executorService);
    PropertyEventBus<ServiceProperties> serviceBus = new PropertyEventBusImpl<ServiceProperties>(executorService);
    PropertyEventBus<UriProperties> uriBus = new PropertyEventBusImpl<UriProperties>(executorService);
    // This ensures the filesystem store receives the events from the event bus so that
    // it can keep a local backup.
    clusterBus.register(fsClusterStore);
    serviceBus.register(fsServiceStore);
    uriBus.register(fsUriStore);
    TogglingPublisher<ClusterProperties> clusterToggle = _factory.createClusterToggle(zkClusterRegistry, fsClusterStore, clusterBus);
    TogglingPublisher<ServiceProperties> serviceToggle = _factory.createServiceToggle(zkServiceRegistry, fsServiceStore, serviceBus);
    TogglingPublisher<UriProperties> uriToggle = _factory.createUriToggle(zkUriRegistry, fsUriStore, uriBus);
    SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriBus, clusterBus, serviceBus, _clientFactories, _loadBalancerStrategyFactories, _sslContext, _sslParameters, _isSSLEnabled, _clientServicesConfig);
    SimpleLoadBalancer balancer = new SimpleLoadBalancer(state, _lbTimeout, _lbTimeoutUnit);
    TogglingLoadBalancer togLB = _factory.createBalancer(balancer, state, clusterToggle, serviceToggle, uriToggle);
    togLB.start(new Callback<None>() {

        @Override
        public void onError(Throwable e) {
            _log.warn("Failed to run start on the TogglingLoadBalancer, may not have registered " + "SimpleLoadBalancer and State with JMX.");
        }

        @Override
        public void onSuccess(None result) {
            _log.info("Registered SimpleLoadBalancer and State with JMX.");
        }
    });
    return togLB;
}
Also used : ServicePropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer) UriPropertiesMerger(com.linkedin.d2.balancer.properties.UriPropertiesMerger) TogglingLoadBalancer(com.linkedin.d2.balancer.util.TogglingLoadBalancer) UriPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.UriPropertiesJsonSerializer) ClusterPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.ClusterPropertiesJsonSerializer) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) SimpleLoadBalancerState(com.linkedin.d2.balancer.simple.SimpleLoadBalancerState) SimpleLoadBalancer(com.linkedin.d2.balancer.simple.SimpleLoadBalancer) ServiceProperties(com.linkedin.d2.balancer.properties.ServiceProperties) PropertyEventBusImpl(com.linkedin.d2.discovery.event.PropertyEventBusImpl) ClusterProperties(com.linkedin.d2.balancer.properties.ClusterProperties) None(com.linkedin.common.util.None)

Example 7 with ZooKeeperEphemeralStore

use of com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore in project rest.li by linkedin.

the class ZookeeperConnectionManagerTest method testMarkUpDuringDisconnection.

@Test
public void testMarkUpDuringDisconnection() throws ExecutionException, InterruptedException, IOException, PropertyStoreException {
    final String uri = "http://cluster-3/test";
    final String cluster = "cluster-3";
    final double weight = 0.5d;
    ZooKeeperAnnouncer announcer = new ZooKeeperAnnouncer(new ZooKeeperServer());
    announcer.setCluster(cluster);
    announcer.setUri(uri);
    Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
    partitionWeight.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(weight));
    announcer.setPartitionData(partitionWeight);
    ZooKeeperConnectionManager manager = createManager(announcer);
    _zkServer.shutdown(false);
    FutureCallback<None> managerStartCallback = new FutureCallback<None>();
    manager.start(managerStartCallback);
    _zkServer.restart();
    managerStartCallback.get();
    ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
    UriProperties properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.getPartitionDataMap(URI.create(uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), weight);
    assertEquals(properties.Uris().size(), 1);
}
Also used : HashMap(java.util.HashMap) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) None(com.linkedin.common.util.None) FutureCallback(com.linkedin.common.callback.FutureCallback) Test(org.testng.annotations.Test)

Example 8 with ZooKeeperEphemeralStore

use of com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore in project rest.li by linkedin.

the class ZookeeperConnectionManagerTest method testMarkDownDuringDisconnection.

@Test
public void testMarkDownDuringDisconnection() throws IOException, ExecutionException, InterruptedException, PropertyStoreException {
    final String uri = "http://cluster-4/test";
    final String cluster = "cluster-4";
    final double weight = 0.5d;
    ZooKeeperAnnouncer announcer = new ZooKeeperAnnouncer(new ZooKeeperServer());
    announcer.setCluster(cluster);
    announcer.setUri(uri);
    Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
    partitionWeight.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(weight));
    announcer.setPartitionData(partitionWeight);
    ZooKeeperConnectionManager manager = createManager(announcer);
    FutureCallback<None> managerStartCallback = new FutureCallback<None>();
    manager.start(managerStartCallback);
    managerStartCallback.get();
    ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
    UriProperties properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.getPartitionDataMap(URI.create(uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), weight);
    assertEquals(properties.Uris().size(), 1);
    _zkServer.shutdown(false);
    FutureCallback<None> markDownCallback = new FutureCallback<None>();
    announcer.markDown(markDownCallback);
    // ugly, but we need to wait for a while just so that Disconnect event is propagated
    // to the caller before we restart zk sever.
    Thread.sleep(1000);
    _zkServer.restart();
    markDownCallback.get();
    properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.Uris().size(), 0);
}
Also used : HashMap(java.util.HashMap) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) None(com.linkedin.common.util.None) FutureCallback(com.linkedin.common.callback.FutureCallback) Test(org.testng.annotations.Test)

Example 9 with ZooKeeperEphemeralStore

use of com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore in project rest.li by linkedin.

the class ZookeeperConnectionManagerTest method testMarkUpAndMarkDown.

@Test
public void testMarkUpAndMarkDown() throws IOException, ExecutionException, InterruptedException, PropertyStoreException {
    final String uri = "http://cluster-2/test";
    final String cluster = "cluster-2";
    final double weight = 0.5d;
    ZooKeeperAnnouncer announcer = new ZooKeeperAnnouncer(new ZooKeeperServer());
    announcer.setCluster(cluster);
    announcer.setUri(uri);
    Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
    partitionWeight.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(weight));
    announcer.setPartitionData(partitionWeight);
    ZooKeeperConnectionManager manager = createManager(announcer);
    FutureCallback<None> managerStartCallback = new FutureCallback<None>();
    manager.start(managerStartCallback);
    managerStartCallback.get();
    ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
    UriProperties properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.getPartitionDataMap(URI.create(uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), weight);
    assertEquals(properties.Uris().size(), 1);
    FutureCallback<None> markDownCallback = new FutureCallback<None>();
    announcer.markDown(markDownCallback);
    markDownCallback.get();
    properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.Uris().size(), 0);
}
Also used : HashMap(java.util.HashMap) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) None(com.linkedin.common.util.None) FutureCallback(com.linkedin.common.callback.FutureCallback) Test(org.testng.annotations.Test)

Example 10 with ZooKeeperEphemeralStore

use of com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore in project rest.li by linkedin.

the class ZooKeeperServerTest method testZkServer.

@Test(groups = { "small", "back-end" })
public void testZkServer() throws InterruptedException, URISyntaxException, IOException, PropertyStoreException, ExecutionException {
    URI uri1 = URI.create("http://cluster-1/test");
    URI uri2 = URI.create("http://cluster-1-again/test");
    ZKConnection zkClient = new ZKConnection("localhost:" + PORT, 5000);
    zkClient.start();
    ZooKeeperEphemeralStore<UriProperties> store = new ZooKeeperEphemeralStore<UriProperties>(zkClient, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), "/echo/lb/uris");
    FutureCallback<None> callback = new FutureCallback<None>();
    store.start(callback);
    callback.get();
    ZooKeeperServer server = new ZooKeeperServer(store);
    final String cluster = "cluster-1";
    assertNull(store.get(cluster));
    assertNull(store.get("cluster-2"));
    // bring up uri1
    markUp(server, cluster, uri1, 0.5d);
    UriProperties properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.getPartitionDataMap(uri1).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 0.5d);
    assertEquals(properties.Uris().size(), 1);
    // test mark up when already up call
    markUp(server, cluster, uri1, 2d);
    properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.getPartitionDataMap(uri1).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 2d);
    assertEquals(properties.Uris().size(), 1);
    // bring up uri 2
    markUp(server, cluster, uri2, 1.5d);
    properties = store.get(cluster);
    assertEquals(properties.getPartitionDataMap(uri1).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 2d);
    assertEquals(properties.getPartitionDataMap(uri2).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 1.5d);
    assertEquals(properties.Uris().size(), 2);
    // bring down uri 1
    markDown(server, cluster, uri1);
    properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.getPartitionDataMap(uri2).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 1.5d);
    assertEquals(properties.Uris().size(), 1);
    // test bring down when already down
    markDown(server, cluster, uri1);
    properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.getPartitionDataMap(uri2).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 1.5d);
    assertEquals(properties.Uris().size(), 1);
    // bring down uri 2
    markDown(server, cluster, uri2);
    properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.Uris().size(), 0);
    // test bad cluster doesn't exist
    markDown(server, "BAD CLUSTER", uri1);
    properties = store.get("BAD CLUSTER");
    assertNull(properties);
    // bring up uri1
    Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
    partitionWeight.put(5, new PartitionData(0.3d));
    partitionWeight.put(15, new PartitionData(0.7d));
    markUp(server, cluster, uri1, partitionWeight, null);
    properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.getPartitionDataMap(uri1), partitionWeight);
    Map<String, Object> uri2SpecificProperties = new HashMap<String, Object>();
    uri2SpecificProperties.put("foo", "fooValue");
    uri2SpecificProperties.put("bar", 1);
    partitionWeight.put(10, new PartitionData(1d));
    // bring up uri2 with uri specific properties
    markUp(server, cluster, uri2, partitionWeight, uri2SpecificProperties);
    properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.Uris().size(), 2);
    assertEquals(properties.getPartitionDataMap(uri2), partitionWeight);
    assertNotNull(properties.getUriSpecificProperties());
    assertEquals(properties.getUriSpecificProperties().size(), 1);
    assertEquals(properties.getUriSpecificProperties().get(uri2), uri2SpecificProperties);
    // bring down uri1 and bring it back up again with properties
    markDown(server, cluster, uri1);
    Map<String, Object> uri1SpecificProperties = new HashMap<String, Object>();
    uri1SpecificProperties.put("baz", "bazValue");
    // use new partition data so that we can test the mapping later on
    Map<Integer, PartitionData> newUri1PartitionWeights = new HashMap<Integer, PartitionData>(partitionWeight);
    newUri1PartitionWeights.remove(10);
    markUp(server, cluster, uri1, newUri1PartitionWeights, uri1SpecificProperties);
    properties = store.get(cluster);
    assertNotNull(properties);
    assertEquals(properties.Uris().size(), 2);
    assertEquals(properties.getPartitionDataMap(uri1), newUri1PartitionWeights);
    assertEquals(properties.getPartitionDataMap(uri2), partitionWeight);
    assertNotNull(properties.getUriSpecificProperties());
    assertEquals(properties.getUriSpecificProperties().size(), 2);
    assertEquals(properties.getUriSpecificProperties().get(uri1), uri1SpecificProperties);
    assertEquals(properties.getUriSpecificProperties().get(uri2), uri2SpecificProperties);
    Set<URI> uriSet = new HashSet<URI>();
    uriSet.add(uri1);
    uriSet.add(uri2);
    assertEquals(properties.getUriBySchemeAndPartition("http", 5), uriSet);
    uriSet.remove(uri1);
    assertEquals(properties.getUriBySchemeAndPartition("http", 10), uriSet);
}
Also used : ZKConnection(com.linkedin.d2.discovery.stores.zk.ZKConnection) HashMap(java.util.HashMap) UriPropertiesMerger(com.linkedin.d2.balancer.properties.UriPropertiesMerger) ZooKeeperEphemeralStore(com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore) URI(java.net.URI) UriPropertiesJsonSerializer(com.linkedin.d2.balancer.properties.UriPropertiesJsonSerializer) PartitionData(com.linkedin.d2.balancer.properties.PartitionData) UriProperties(com.linkedin.d2.balancer.properties.UriProperties) None(com.linkedin.common.util.None) FutureCallback(com.linkedin.common.callback.FutureCallback) HashSet(java.util.HashSet) Test(org.testng.annotations.Test)

Aggregations

UriProperties (com.linkedin.d2.balancer.properties.UriProperties)17 None (com.linkedin.common.util.None)14 FutureCallback (com.linkedin.common.callback.FutureCallback)13 HashMap (java.util.HashMap)13 UriPropertiesJsonSerializer (com.linkedin.d2.balancer.properties.UriPropertiesJsonSerializer)11 UriPropertiesMerger (com.linkedin.d2.balancer.properties.UriPropertiesMerger)11 ZooKeeperEphemeralStore (com.linkedin.d2.discovery.stores.zk.ZooKeeperEphemeralStore)10 Test (org.testng.annotations.Test)10 PartitionData (com.linkedin.d2.balancer.properties.PartitionData)9 ServiceProperties (com.linkedin.d2.balancer.properties.ServiceProperties)9 ServicePropertiesJsonSerializer (com.linkedin.d2.balancer.properties.ServicePropertiesJsonSerializer)9 ClusterProperties (com.linkedin.d2.balancer.properties.ClusterProperties)7 ClusterPropertiesJsonSerializer (com.linkedin.d2.balancer.properties.ClusterPropertiesJsonSerializer)7 ZooKeeperPermanentStore (com.linkedin.d2.discovery.stores.zk.ZooKeeperPermanentStore)6 ZKConnection (com.linkedin.d2.discovery.stores.zk.ZKConnection)5 PropertyEventBusImpl (com.linkedin.d2.discovery.event.PropertyEventBusImpl)4 HashSet (java.util.HashSet)4 SimpleLoadBalancer (com.linkedin.d2.balancer.simple.SimpleLoadBalancer)3 SimpleLoadBalancerState (com.linkedin.d2.balancer.simple.SimpleLoadBalancerState)3 PropertyStringSerializer (com.linkedin.d2.discovery.stores.PropertyStringSerializer)3