use of com.linkedin.d2.discovery.stores.zk.ZKConnection in project rest.li by linkedin.
the class ZKFSLoadBalancer method start.
@Override
public void start(final Callback<None> callback) {
LOG.info("Starting ZKFSLoadBalancer");
LOG.info("ZK connect string: {}", _connectString);
LOG.info("ZK session timeout: {}ms", _sessionTimeout);
LOG.info("ZK initial connect timeout: {}ms", _initialZKTimeout);
if (_connectString == null || _connectString.isEmpty()) {
callback.onError(new IllegalArgumentException("ZooKeeper connection string is null or empty"));
return;
}
if (_zkFlagFile == null) {
LOG.info("ZK flag file not specified");
} else {
LOG.info("ZK flag file: {}", _zkFlagFile.getAbsolutePath());
LOG.info("ZK currently suppressed by flag file: {}", suppressZK());
}
_zkConnection = new ZKConnection(_connectString, _sessionTimeout, _shutdownAsynchronously, _isSymlinkAware);
final TogglingLoadBalancer balancer = _loadBalancerFactory.createLoadBalancer(_zkConnection, _executor);
// all other cases, we service requests from the old LoadBalancer until the new one is started
if (_currentLoadBalancer == null) {
_currentLoadBalancer = balancer;
}
Callback<None> wrapped = new Callback<None>() {
@Override
public void onSuccess(None none) {
_currentLoadBalancer = balancer;
callback.onSuccess(none);
}
@Override
public void onError(Throwable e) {
callback.onError(e);
}
};
if (!_startupCallback.compareAndSet(null, wrapped)) {
throw new IllegalStateException("Startup already in progress");
}
_executor.execute(new PropertyEventThread.PropertyEvent("startup") {
@Override
public void innerRun() {
_zkConnection.addStateListener(new ZKListener(balancer));
try {
_zkConnection.start();
} catch (Exception e) {
LOG.error("Failed to start ZooKeeper (bad configuration?), enabling backup stores", e);
Callback<None> startupCallback = _startupCallback.getAndSet(null);
// TODO this should never be null
balancer.enableBackup(startupCallback);
return;
}
LOG.info("Started ZooKeeper");
_executor.schedule(new Runnable() {
@Override
public void run() {
Callback<None> startupCallback = _startupCallback.getAndSet(null);
if (startupCallback != null) {
// Noone has enabled the stores yet either way
LOG.error("No response from ZooKeeper within {}ms, enabling backup stores", _initialZKTimeout);
balancer.enableBackup(startupCallback);
}
}
}, _initialZKTimeout, TimeUnit.MILLISECONDS);
}
});
}
use of com.linkedin.d2.discovery.stores.zk.ZKConnection in project rest.li by linkedin.
the class ZKFSTogglingLoadBalancerFactoryImpl method createLoadBalancer.
@Override
public TogglingLoadBalancer createLoadBalancer(ZKConnection zkConnection, ScheduledExecutorService executorService) {
_log.info("Using d2ServicePath: " + _d2ServicePath);
ZooKeeperPermanentStore<ClusterProperties> zkClusterRegistry = createPermanentStore(zkConnection, ZKFSUtil.clusterPath(_baseZKPath), new ClusterPropertiesJsonSerializer());
ZooKeeperPermanentStore<ServiceProperties> zkServiceRegistry = createPermanentStore(zkConnection, ZKFSUtil.servicePath(_baseZKPath, _d2ServicePath), new ServicePropertiesJsonSerializer());
ZooKeeperEphemeralStore<UriProperties> zkUriRegistry = createEphemeralStore(zkConnection, ZKFSUtil.uriPath(_baseZKPath), new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), _useNewEphemeralStoreWatcher);
FileStore<ClusterProperties> fsClusterStore = createFileStore("clusters", new ClusterPropertiesJsonSerializer());
FileStore<ServiceProperties> fsServiceStore = createFileStore(_d2ServicePath, new ServicePropertiesJsonSerializer());
FileStore<UriProperties> fsUriStore = createFileStore("uris", new UriPropertiesJsonSerializer());
PropertyEventBus<ClusterProperties> clusterBus = new PropertyEventBusImpl<ClusterProperties>(executorService);
PropertyEventBus<ServiceProperties> serviceBus = new PropertyEventBusImpl<ServiceProperties>(executorService);
PropertyEventBus<UriProperties> uriBus = new PropertyEventBusImpl<UriProperties>(executorService);
// This ensures the filesystem store receives the events from the event bus so that
// it can keep a local backup.
clusterBus.register(fsClusterStore);
serviceBus.register(fsServiceStore);
uriBus.register(fsUriStore);
TogglingPublisher<ClusterProperties> clusterToggle = _factory.createClusterToggle(zkClusterRegistry, fsClusterStore, clusterBus);
TogglingPublisher<ServiceProperties> serviceToggle = _factory.createServiceToggle(zkServiceRegistry, fsServiceStore, serviceBus);
TogglingPublisher<UriProperties> uriToggle = _factory.createUriToggle(zkUriRegistry, fsUriStore, uriBus);
SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriBus, clusterBus, serviceBus, _clientFactories, _loadBalancerStrategyFactories, _sslContext, _sslParameters, _isSSLEnabled, _clientServicesConfig);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(state, _lbTimeout, _lbTimeoutUnit);
TogglingLoadBalancer togLB = _factory.createBalancer(balancer, state, clusterToggle, serviceToggle, uriToggle);
togLB.start(new Callback<None>() {
@Override
public void onError(Throwable e) {
_log.warn("Failed to run start on the TogglingLoadBalancer, may not have registered " + "SimpleLoadBalancer and State with JMX.");
}
@Override
public void onSuccess(None result) {
_log.info("Registered SimpleLoadBalancer and State with JMX.");
}
});
return togLB;
}
use of com.linkedin.d2.discovery.stores.zk.ZKConnection in project rest.li by linkedin.
the class R2D2Server method putService.
private void putService(ServiceProperties serviceProperties) throws Exception {
System.err.println("put: " + serviceProperties);
ZKConnection client = new ZKConnection(_zookeeperHost + ":" + _zookeeperPort, 30000);
PropertyStore<ServiceProperties> store = new ZooKeeperPermanentStore<ServiceProperties>(client, new ServicePropertiesJsonSerializer(), _basePath + "/services");
store.put(serviceProperties.getServiceName(), serviceProperties);
client.getZooKeeper().close();
}
use of com.linkedin.d2.discovery.stores.zk.ZKConnection in project rest.li by linkedin.
the class R2D2Server method putCluster.
private void putCluster(ClusterProperties clusterProperties) throws Exception {
System.err.println("put: " + clusterProperties);
ZKConnection client = new ZKConnection(_zookeeperHost + ":" + _zookeeperPort, 30000);
PropertyStore<ClusterProperties> store = new ZooKeeperPermanentStore<ClusterProperties>(client, new ClusterPropertiesJsonSerializer(), _basePath + "/clusters");
store.put(clusterProperties.getClusterName(), clusterProperties);
client.getZooKeeper().close();
}
use of com.linkedin.d2.discovery.stores.zk.ZKConnection in project rest.li by linkedin.
the class ZooKeeperServerTest method testZkServer.
@Test(groups = { "small", "back-end" })
public void testZkServer() throws InterruptedException, URISyntaxException, IOException, PropertyStoreException, ExecutionException {
URI uri1 = URI.create("http://cluster-1/test");
URI uri2 = URI.create("http://cluster-1-again/test");
ZKConnection zkClient = new ZKConnection("localhost:" + PORT, 5000);
zkClient.start();
ZooKeeperEphemeralStore<UriProperties> store = new ZooKeeperEphemeralStore<UriProperties>(zkClient, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), "/echo/lb/uris");
FutureCallback<None> callback = new FutureCallback<None>();
store.start(callback);
callback.get();
ZooKeeperServer server = new ZooKeeperServer(store);
final String cluster = "cluster-1";
assertNull(store.get(cluster));
assertNull(store.get("cluster-2"));
// bring up uri1
markUp(server, cluster, uri1, 0.5d);
UriProperties properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri1).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 0.5d);
assertEquals(properties.Uris().size(), 1);
// test mark up when already up call
markUp(server, cluster, uri1, 2d);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri1).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 2d);
assertEquals(properties.Uris().size(), 1);
// bring up uri 2
markUp(server, cluster, uri2, 1.5d);
properties = store.get(cluster);
assertEquals(properties.getPartitionDataMap(uri1).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 2d);
assertEquals(properties.getPartitionDataMap(uri2).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 1.5d);
assertEquals(properties.Uris().size(), 2);
// bring down uri 1
markDown(server, cluster, uri1);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri2).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 1.5d);
assertEquals(properties.Uris().size(), 1);
// test bring down when already down
markDown(server, cluster, uri1);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri2).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), 1.5d);
assertEquals(properties.Uris().size(), 1);
// bring down uri 2
markDown(server, cluster, uri2);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.Uris().size(), 0);
// test bad cluster doesn't exist
markDown(server, "BAD CLUSTER", uri1);
properties = store.get("BAD CLUSTER");
assertNull(properties);
// bring up uri1
Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
partitionWeight.put(5, new PartitionData(0.3d));
partitionWeight.put(15, new PartitionData(0.7d));
markUp(server, cluster, uri1, partitionWeight, null);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(uri1), partitionWeight);
Map<String, Object> uri2SpecificProperties = new HashMap<String, Object>();
uri2SpecificProperties.put("foo", "fooValue");
uri2SpecificProperties.put("bar", 1);
partitionWeight.put(10, new PartitionData(1d));
// bring up uri2 with uri specific properties
markUp(server, cluster, uri2, partitionWeight, uri2SpecificProperties);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.Uris().size(), 2);
assertEquals(properties.getPartitionDataMap(uri2), partitionWeight);
assertNotNull(properties.getUriSpecificProperties());
assertEquals(properties.getUriSpecificProperties().size(), 1);
assertEquals(properties.getUriSpecificProperties().get(uri2), uri2SpecificProperties);
// bring down uri1 and bring it back up again with properties
markDown(server, cluster, uri1);
Map<String, Object> uri1SpecificProperties = new HashMap<String, Object>();
uri1SpecificProperties.put("baz", "bazValue");
// use new partition data so that we can test the mapping later on
Map<Integer, PartitionData> newUri1PartitionWeights = new HashMap<Integer, PartitionData>(partitionWeight);
newUri1PartitionWeights.remove(10);
markUp(server, cluster, uri1, newUri1PartitionWeights, uri1SpecificProperties);
properties = store.get(cluster);
assertNotNull(properties);
assertEquals(properties.Uris().size(), 2);
assertEquals(properties.getPartitionDataMap(uri1), newUri1PartitionWeights);
assertEquals(properties.getPartitionDataMap(uri2), partitionWeight);
assertNotNull(properties.getUriSpecificProperties());
assertEquals(properties.getUriSpecificProperties().size(), 2);
assertEquals(properties.getUriSpecificProperties().get(uri1), uri1SpecificProperties);
assertEquals(properties.getUriSpecificProperties().get(uri2), uri2SpecificProperties);
Set<URI> uriSet = new HashSet<URI>();
uriSet.add(uri1);
uriSet.add(uri2);
assertEquals(properties.getUriBySchemeAndPartition("http", 5), uriSet);
uriSet.remove(uri1);
assertEquals(properties.getUriBySchemeAndPartition("http", 10), uriSet);
}
Aggregations