use of com.linkedin.d2.balancer.zkfs.ZKFSLoadBalancer in project rest.li by linkedin.
the class ZKFSTest method testClusterDirectory.
@Test
public void testClusterDirectory() throws Exception {
final String TEST_CLUSTER_NAME = "testingService";
startServer();
try {
ZKFSLoadBalancer balancer = getBalancer();
FutureCallback<None> callback = new FutureCallback<None>();
balancer.start(callback);
callback.get(30, TimeUnit.SECONDS);
Directory dir = balancer.getDirectory();
ZKConnection conn = new ZKConnection("localhost:" + PORT, 30000);
conn.start();
ZooKeeperPermanentStore<ClusterProperties> store = new ZooKeeperPermanentStore<ClusterProperties>(conn, new ClusterPropertiesJsonSerializer(), ZKFSUtil.clusterPath(BASE_PATH));
callback = new FutureCallback<None>();
store.start(callback);
callback.get(30, TimeUnit.SECONDS);
ClusterProperties props = new ClusterProperties(TEST_CLUSTER_NAME);
store.put(TEST_CLUSTER_NAME, props);
FutureCallback<List<String>> clusterCallback = new FutureCallback<List<String>>();
dir.getClusterNames(clusterCallback);
Assert.assertEquals(clusterCallback.get(30, TimeUnit.SECONDS), Collections.singletonList(TEST_CLUSTER_NAME));
} finally {
stopServer();
}
}
use of com.linkedin.d2.balancer.zkfs.ZKFSLoadBalancer in project rest.li by linkedin.
the class LoadBalancerClientCli method shutdown.
public void shutdown() throws Exception {
if (_zkClusterRegistry != null) {
try {
shutdownZKRegistry(_zkClusterRegistry);
} catch (Exception e) {
_log.error("Failed to shutdown ZooKeeperPermanentStore<ClusterProperties> zkClusterRegistry.");
}
}
if (_zkServiceRegistry != null) {
try {
shutdownZKRegistry(_zkServiceRegistry);
} catch (Exception e) {
_log.error("Failed to shutdown ZooKeeperPermanentStore<ServiceProperties> zkServiceRegistry.");
}
}
if (_zkUriRegistry != null) {
try {
shutdownZKRegistry(_zkUriRegistry);
} catch (Exception e) {
_log.error("Failed to shutdown ZooKeeperEphemeralStore<UriProperties> zkUriRegistry.");
}
}
try {
if (_client != null) {
LoadBalancerUtil.syncShutdownClient(_client, _log);
}
} catch (Exception e) {
_log.error("Failed to shutdown dynamic client.");
}
if (_zkfsLoadBalancer != null) {
try {
final CountDownLatch latch = new CountDownLatch(1);
_zkfsLoadBalancer.shutdown(new PropertyEventShutdownCallback() {
@Override
public void done() {
latch.countDown();
}
});
if (!latch.await(5, TimeUnit.SECONDS)) {
_log.error("unable to shut down store");
}
} catch (Exception e) {
_log.error("Failed to shutdown zkfsLoadBalancer.");
}
}
try {
deleteTempDir();
} catch (Exception e) {
_log.error("Failed to delete directory " + _tmpDir);
}
try {
_zkclient.shutdown();
} catch (Exception e) {
_log.error("Failed to shutdown zk client.");
}
}
use of com.linkedin.d2.balancer.zkfs.ZKFSLoadBalancer in project rest.li by linkedin.
the class ZKFSTest method getBalancer.
private ZKFSLoadBalancer getBalancer() {
ZKFSComponentFactory f = new ZKFSComponentFactory();
Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>>();
loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
Map<String, TransportClientFactory> clientFactories = new HashMap<String, TransportClientFactory>();
clientFactories.put("http", new HttpClientFactory());
// We rely on _tmpdir below being fresh for each test case. Otherwise, leftover files in
// _tmpdir from a previous test could affect another test. This is accomplished with the
// @BeforeMethod and @AfterMethod annotations.
ZKFSTogglingLoadBalancerFactoryImpl f2 = new ZKFSTogglingLoadBalancerFactoryImpl(f, 5, TimeUnit.SECONDS, BASE_PATH, _tmpdir.getAbsolutePath(), clientFactories, loadBalancerStrategyFactories);
ZKFSLoadBalancer balancer = new ZKFSLoadBalancer("localhost:" + PORT, 60000, 5000, f2, null, BASE_PATH);
return balancer;
}
use of com.linkedin.d2.balancer.zkfs.ZKFSLoadBalancer in project rest.li by linkedin.
the class ZKFSTest method testZKDown.
@Test
public void testZKDown() throws Exception {
final String TEST_SERVICE_NAME = "testingService";
final String TEST_CLUSTER_NAME = "someCluster";
startServer();
try {
ZKFSLoadBalancer balancer = getBalancer();
FutureCallback<None> callback = new FutureCallback<None>();
balancer.start(callback);
callback.get(30, TimeUnit.SECONDS);
ZKConnection conn = new ZKConnection("localhost:" + PORT, 30000);
conn.start();
ZooKeeperPermanentStore<ServiceProperties> store = new ZooKeeperPermanentStore<ServiceProperties>(conn, new ServicePropertiesJsonSerializer(), ZKFSUtil.servicePath(BASE_PATH));
callback = new FutureCallback<None>();
store.start(callback);
callback.get(30, TimeUnit.SECONDS);
ServiceProperties props = new ServiceProperties(TEST_SERVICE_NAME, TEST_CLUSTER_NAME, "/somePath", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, Arrays.asList("http"), null);
store.put(TEST_SERVICE_NAME, props);
ZooKeeperPermanentStore<ClusterProperties> clusterStore = new ZooKeeperPermanentStore<ClusterProperties>(conn, new ClusterPropertiesJsonSerializer(), ZKFSUtil.clusterPath(BASE_PATH));
callback = new FutureCallback<None>();
clusterStore.start(callback);
callback.get(30, TimeUnit.SECONDS);
ClusterProperties clusterProps = new ClusterProperties("someCluster");
clusterStore.put(TEST_CLUSTER_NAME, clusterProps);
ZKConnection serverConn = new ZKConnection("localhost:" + PORT, 30000);
serverConn.start();
ZooKeeperEphemeralStore<UriProperties> uriStore = new ZooKeeperEphemeralStore<UriProperties>(serverConn, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), ZKFSUtil.uriPath(BASE_PATH));
callback = new FutureCallback<None>();
uriStore.start(callback);
callback.get(30, TimeUnit.SECONDS);
ZooKeeperServer server = new ZooKeeperServer(uriStore);
callback = new FutureCallback<None>();
Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>();
partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1.0));
server.markUp(TEST_CLUSTER_NAME, URI.create("http://test.uri"), partitionDataMap, callback);
callback.get(30, TimeUnit.SECONDS);
URIRequest request = new URIRequest("d2://" + TEST_SERVICE_NAME + "/foo");
TransportClient client = balancer.getClient(request, new RequestContext());
// Stop the server to cause a disconnect event
stopServer();
// Sleep to ensure the disconnect has propagated; ideally the Toggle should expose
// some interface to allow detection that the toggle occurred
Thread.sleep(1000);
// Now see if it still works
client = balancer.getClient(request, new RequestContext());
} finally {
stopServer();
}
}
Aggregations