use of com.linkedin.d2.balancer.servers.ZooKeeperServer in project rest.li by linkedin.
the class ZookeeperConnectionManagerTest method testMarkUpDuringDisconnection.
@Test
public void testMarkUpDuringDisconnection() throws ExecutionException, InterruptedException, IOException, PropertyStoreException {
final String uri = "http://cluster-3/test";
final String cluster = "cluster-3";
final double weight = 0.5d;
ZooKeeperAnnouncer announcer = new ZooKeeperAnnouncer(new ZooKeeperServer());
announcer.setCluster(cluster);
announcer.setUri(uri);
Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
partitionWeight.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(weight));
announcer.setPartitionData(partitionWeight);
ZooKeeperConnectionManager manager = createManager(announcer);
_zkServer.shutdown(false);
FutureCallback<None> managerStartCallback = new FutureCallback<None>();
manager.start(managerStartCallback);
_zkServer.restart();
managerStartCallback.get();
ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
UriProperties properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(URI.create(uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), weight);
assertEquals(properties.Uris().size(), 1);
}
use of com.linkedin.d2.balancer.servers.ZooKeeperServer in project rest.li by linkedin.
the class ZookeeperConnectionManagerTest method testMarkDownDuringDisconnection.
@Test
public void testMarkDownDuringDisconnection() throws IOException, ExecutionException, InterruptedException, PropertyStoreException {
final String uri = "http://cluster-4/test";
final String cluster = "cluster-4";
final double weight = 0.5d;
ZooKeeperAnnouncer announcer = new ZooKeeperAnnouncer(new ZooKeeperServer());
announcer.setCluster(cluster);
announcer.setUri(uri);
Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
partitionWeight.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(weight));
announcer.setPartitionData(partitionWeight);
ZooKeeperConnectionManager manager = createManager(announcer);
FutureCallback<None> managerStartCallback = new FutureCallback<None>();
manager.start(managerStartCallback);
managerStartCallback.get();
ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
UriProperties properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(URI.create(uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), weight);
assertEquals(properties.Uris().size(), 1);
_zkServer.shutdown(false);
FutureCallback<None> markDownCallback = new FutureCallback<None>();
announcer.markDown(markDownCallback);
// ugly, but we need to wait for a while just so that Disconnect event is propagated
// to the caller before we restart zk sever.
Thread.sleep(1000);
_zkServer.restart();
markDownCallback.get();
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.Uris().size(), 0);
}
use of com.linkedin.d2.balancer.servers.ZooKeeperServer in project rest.li by linkedin.
the class ZookeeperConnectionManagerTest method testMarkUpAndMarkDown.
@Test
public void testMarkUpAndMarkDown() throws IOException, ExecutionException, InterruptedException, PropertyStoreException {
final String uri = "http://cluster-2/test";
final String cluster = "cluster-2";
final double weight = 0.5d;
ZooKeeperAnnouncer announcer = new ZooKeeperAnnouncer(new ZooKeeperServer());
announcer.setCluster(cluster);
announcer.setUri(uri);
Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
partitionWeight.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(weight));
announcer.setPartitionData(partitionWeight);
ZooKeeperConnectionManager manager = createManager(announcer);
FutureCallback<None> managerStartCallback = new FutureCallback<None>();
manager.start(managerStartCallback);
managerStartCallback.get();
ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
UriProperties properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(URI.create(uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), weight);
assertEquals(properties.Uris().size(), 1);
FutureCallback<None> markDownCallback = new FutureCallback<None>();
announcer.markDown(markDownCallback);
markDownCallback.get();
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.Uris().size(), 0);
}
use of com.linkedin.d2.balancer.servers.ZooKeeperServer in project rest.li by linkedin.
the class ZooKeeperServerTest method testZkServer.
@Test(groups = { "small", "back-end" })
public void testZkServer() throws InterruptedException, URISyntaxException, IOException, PropertyStoreException, ExecutionException {
URI uri1 = URI.create("http://cluster-1/test");
URI uri2 = URI.create("http://cluster-1-again/test");
ZKConnection zkClient = new ZKConnection("localhost:" + PORT, 5000);
zkClient.start();
ZooKeeperEphemeralStore<UriProperties> store = new ZooKeeperEphemeralStore<UriProperties>(zkClient, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), "/echo/lb/uris");
FutureCallback<None> callback = new FutureCallback<None>();
store.start(callback);
callback.get();
ZooKeeperServer server = new ZooKeeperServer(store);
final String cluster = "cluster-1";
assertNull(store.get(cluster));
assertNull(store.get("cluster-2"));
// bring up uri1
markUp(server, cluster, uri1, 0.5d);
UriProperties properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri1).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 0.5d);
assertEquals(properties.Uris().size(), 1);
// test mark up when already up call
markUp(server, cluster, uri1, 2d);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri1).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 2d);
assertEquals(properties.Uris().size(), 1);
// bring up uri 2
markUp(server, cluster, uri2, 1.5d);
properties = store.get(cluster);
assertEquals(properties.getPartitionDataMap(uri1).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 2d);
assertEquals(properties.getPartitionDataMap(uri2).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 1.5d);
assertEquals(properties.Uris().size(), 2);
// bring down uri 1
markDown(server, cluster, uri1);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri2).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 1.5d);
assertEquals(properties.Uris().size(), 1);
// test bring down when already down
markDown(server, cluster, uri1);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri2).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 1.5d);
assertEquals(properties.Uris().size(), 1);
// bring down uri 2
markDown(server, cluster, uri2);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.Uris().size(), 0);
// test bad cluster doesn't exist
markDown(server, "BAD CLUSTER", uri1);
properties = store.get("BAD CLUSTER");
assertNull(properties);
// bring up uri1
Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
partitionWeight.put(5, new PartitionData(0.3d));
partitionWeight.put(15, new PartitionData(0.7d));
markUp(server, cluster, uri1, partitionWeight, null);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri1), partitionWeight);
Map<String, Object> uri2SpecificProperties = new HashMap<String, Object>();
uri2SpecificProperties.put("foo", "fooValue");
uri2SpecificProperties.put("bar", 1);
partitionWeight.put(10, new PartitionData(1d));
// bring up uri2 with uri specific properties
markUp(server, cluster, uri2, partitionWeight, uri2SpecificProperties);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.Uris().size(), 2);
assertEquals(properties.getPartitionDataMap(uri2), partitionWeight);
assertNotNull(properties.getUriSpecificProperties());
assertEquals(properties.getUriSpecificProperties().size(), 1);
assertEquals(properties.getUriSpecificProperties().get(uri2), uri2SpecificProperties);
// bring down uri1 and bring it back up again with properties
markDown(server, cluster, uri1);
Map<String, Object> uri1SpecificProperties = new HashMap<String, Object>();
uri1SpecificProperties.put("baz", "bazValue");
// use new partition data so that we can test the mapping later on
Map<Integer, PartitionData> newUri1PartitionWeights = new HashMap<Integer, PartitionData>(partitionWeight);
newUri1PartitionWeights.remove(10);
markUp(server, cluster, uri1, newUri1PartitionWeights, uri1SpecificProperties);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.Uris().size(), 2);
assertEquals(properties.getPartitionDataMap(uri1), newUri1PartitionWeights);
assertEquals(properties.getPartitionDataMap(uri2), partitionWeight);
assertNotNull(properties.getUriSpecificProperties());
assertEquals(properties.getUriSpecificProperties().size(), 2);
assertEquals(properties.getUriSpecificProperties().get(uri1), uri1SpecificProperties);
assertEquals(properties.getUriSpecificProperties().get(uri2), uri2SpecificProperties);
Set<URI> uriSet = new HashSet<URI>();
uriSet.add(uri1);
uriSet.add(uri2);
assertEquals(properties.getUriBySchemeAndPartition("http", 5), uriSet);
uriSet.remove(uri1);
assertEquals(properties.getUriBySchemeAndPartition("http", 10), uriSet);
}
use of com.linkedin.d2.balancer.servers.ZooKeeperServer in project rest.li by linkedin.
the class ZooKeeperServerTest method markUp.
private void markUp(ZooKeeperServer server, String cluster, URI uri, double weight) {
Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
partitionWeight.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(weight));
markUp(server, cluster, uri, partitionWeight, null);
}
Aggregations