use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class SimpleLoadBalancerStateTest method testUnregisterClusterListener.
@Test
public void testUnregisterClusterListener() {
reset();
MockClusterListener clusterListener = new MockClusterListener();
_state.registerClusterListener(clusterListener);
assertEquals(clusterListener.getClusterAddedCount(CLUSTER1_CLUSTER_NAME), 0, "expected zero count");
// first add a cluster
_state.listenToCluster(CLUSTER1_CLUSTER_NAME, new NullStateListenerCallback());
_clusterRegistry.put(CLUSTER1_CLUSTER_NAME, new ClusterProperties(CLUSTER1_CLUSTER_NAME));
assertEquals(clusterListener.getClusterAddedCount(CLUSTER1_CLUSTER_NAME), 1, "expected 1 call after put");
_state.unregisterClusterListener(clusterListener);
_clusterRegistry.put(CLUSTER1_CLUSTER_NAME, new ClusterProperties(CLUSTER1_CLUSTER_NAME));
assertEquals(clusterListener.getClusterAddedCount(CLUSTER1_CLUSTER_NAME), 1, "expected 1 call, since we shouldn't have seen the latest put");
}
use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class LoadBalancerEchoClient method getLoadBalancer.
public static SimpleLoadBalancer getLoadBalancer(String hostPort) throws IOException, PropertyStoreException {
// zk stores
ZooKeeperPermanentStore<ClusterProperties> zkClusterRegistry = null;
ZooKeeperPermanentStore<ServiceProperties> zkServiceRegistry = null;
ZooKeeperEphemeralStore<UriProperties> zkUriRegistry = null;
ZKConnection zkClient = new ZKConnection(hostPort, 10000);
zkClusterRegistry = new ZooKeeperPermanentStore<>(zkClient, new ClusterPropertiesJsonSerializer(), _basePath + "/clusters");
zkServiceRegistry = new ZooKeeperPermanentStore<>(zkClient, new ServicePropertiesJsonSerializer(), _basePath + "/services");
zkUriRegistry = new ZooKeeperEphemeralStore<>(zkClient, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), _basePath + "/uris", false, true);
// fs stores
File testDirectory = LoadBalancerUtil.createTempDirectory("lb-degrader-witih-file-store-large");
testDirectory.deleteOnExit();
new File(testDirectory + File.separator + "cluster").mkdir();
new File(testDirectory + File.separator + "service").mkdir();
new File(testDirectory + File.separator + "uri").mkdir();
FileStore<ClusterProperties> fsClusterStore = new FileStore<>(testDirectory + File.separator + "cluster", FileSystemDirectory.FILE_STORE_EXTENSION, new ClusterPropertiesJsonSerializer());
FileStore<ServiceProperties> fsServiceStore = new FileStore<>(testDirectory + File.separator + "service", FileSystemDirectory.FILE_STORE_EXTENSION, new ServicePropertiesJsonSerializer());
FileStore<UriProperties> fsUriStore = new FileStore<>(testDirectory + File.separator + "uri", FileSystemDirectory.FILE_STORE_EXTENSION, new UriPropertiesJsonSerializer());
// chains
PropertyEventThread thread = new PropertyEventThread("echo client event thread");
ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("echo client event thread"));
// start up the world
thread.start();
PropertyEventBus<ServiceProperties> serviceBus = new PropertyEventBusImpl<>(executorService, zkServiceRegistry);
serviceBus.register(fsServiceStore);
new ZooKeeperTogglingStore<>(zkServiceRegistry, fsServiceStore, serviceBus, true);
PropertyEventBus<UriProperties> uriBus = new PropertyEventBusImpl<>(executorService, zkUriRegistry);
uriBus.register(fsUriStore);
new ZooKeeperTogglingStore<>(zkUriRegistry, fsUriStore, uriBus, true);
PropertyEventBus<ClusterProperties> clusterBus = new PropertyEventBusImpl<>(executorService, zkClusterRegistry);
clusterBus.register(fsClusterStore);
new ZooKeeperTogglingStore<>(zkClusterRegistry, fsClusterStore, clusterBus, true);
Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>();
// strategy and scheme factories
loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
Map<String, TransportClientFactory> clientFactories = new HashMap<>();
clientFactories.put("http", new HttpClientFactory.Builder().build());
// create the state
SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriBus, clusterBus, serviceBus, clientFactories, loadBalancerStrategyFactories, null, null, false);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS, executorService);
new JmxManager().registerLoadBalancer("balancer", balancer).registerLoadBalancerState("state", state);
return balancer;
}
use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class SimpleLoadBalancerSimulation method addCluster.
// cluster simulation
public void addCluster(String clusterName, List<String> prioritizedSchemes, List<URI> uris) {
ClusterProperties clusterProperties = new ClusterProperties(clusterName, prioritizedSchemes);
// weight the uris randomly between 1 and 2
Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<>();
for (URI uri : uris) {
Map<Integer, PartitionData> partitionData = new HashMap<>(1);
partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d + _random.nextDouble()));
uriData.put(uri, partitionData);
}
UriProperties uriProperties = new UriProperties(clusterName, uriData);
_expectedClusterProperties.put(clusterName, clusterProperties);
_expectedUriProperties.put(clusterName, uriProperties);
_clusterRegistry.put(clusterName, clusterProperties);
_uriRegistry.put(clusterName, uriProperties);
}
use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class SimpleLoadBalancerSimulation method verifyState.
/**
* Compare the simulator's view of reality with the load balancer's. This method should
* be called after every step is performed and all threads have finished.
*/
public void verifyState() {
// verify that we consumed all messages before we do anything
for (int i = 0; i < _queues.length; ++i) {
if (_queues[i].size() > 0) {
fail("there were messages left in the queue. all messages should have been consumed during this simulation step.");
}
}
// verify that all clients have been shut down
for (Map.Entry<String, TransportClientFactory> e : _clientFactories.entrySet()) {
DoNothingClientFactory factory = (DoNothingClientFactory) e.getValue();
if (factory.getRunningClientCount() != 0) {
fail("Not all clients were shut down from factory " + e.getKey());
}
}
try {
final CountDownLatch latch = new CountDownLatch(1);
PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {
@Override
public void done() {
latch.countDown();
}
};
_state.shutdown(callback);
if (!latch.await(60, TimeUnit.SECONDS)) {
fail("unable to shutdown state");
}
} catch (InterruptedException e) {
fail("unable to shutdown state in verifyState.");
}
// New load balancer with no timeout; the code below checks for services that don't
// exist,
// and a load balancer with non-zero timeout will just timeout waiting for them to be
// registered, which will never happen because the PropertyEventThread is shut down.
_loadBalancer = new SimpleLoadBalancer(_state, 0, TimeUnit.SECONDS, _executorService);
// verify services are as we expect
for (String possibleService : _possibleServices) {
// about it
if (!_expectedServiceProperties.containsKey(possibleService) || !_state.isListeningToService(possibleService)) {
LoadBalancerStateItem<ServiceProperties> serviceItem = _state.getServiceProperties(possibleService);
assertTrue(serviceItem == null || serviceItem.getProperty() == null);
} else {
ServiceProperties serviceProperties = _expectedServiceProperties.get(possibleService);
ClusterProperties clusterProperties = _expectedClusterProperties.get(serviceProperties.getClusterName());
UriProperties uriProperties = _expectedUriProperties.get(serviceProperties.getClusterName());
assertEquals(_state.getServiceProperties(possibleService).getProperty(), serviceProperties);
// verify round robin'ing of the hosts for this service
for (int i = 0; i < 100; ++i) {
try {
// this call will queue up messages if we're not listening to the service, but
// it's ok, because all of the messengers have been stopped.
final TransportClient client = _loadBalancer.getClient(new URIRequest("d2://" + possibleService + random(_possiblePaths)), new RequestContext());
// if we didn't receive service unavailable, we should
// get a client back
assertNotNull(client, "Not found client for: d2://" + possibleService + random(_possiblePaths));
} catch (ServiceUnavailableException e) {
if (uriProperties != null && clusterProperties != null) {
// only way to get here is if the prioritized
// schemes could find no available uris in the
// cluster. let's see if we can find a URI that
// matches a prioritized scheme in the cluster.
Set<String> schemes = new HashSet<>();
for (URI uri : uriProperties.Uris()) {
schemes.add(uri.getScheme());
}
for (String scheme : clusterProperties.getPrioritizedSchemes()) {
// the code.
if (schemes.contains(scheme) && _clientFactories.containsKey(scheme)) {
break;
}
assertFalse(schemes.contains(scheme) && _clientFactories.containsKey(scheme), "why couldn't a client be found for schemes " + clusterProperties.getPrioritizedSchemes() + " with URIs: " + uriProperties.Uris());
}
}
}
}
}
}
// verify clusters are as we expect
for (String possibleCluster : _possibleClusters) {
LoadBalancerStateItem<ClusterProperties> clusterItem = _state.getClusterProperties(possibleCluster);
if (!_expectedClusterProperties.containsKey(possibleCluster) || !_state.isListeningToCluster(possibleCluster)) {
assertTrue(clusterItem == null || clusterItem.getProperty() == null, "cluster item for " + possibleCluster + " is not null: " + clusterItem);
} else {
assertNotNull(clusterItem, "Item for cluster " + possibleCluster + " should not be null, listening: " + _state.isListeningToCluster(possibleCluster) + ", keys: " + _expectedClusterProperties.keySet());
assertEquals(clusterItem.getProperty(), _expectedClusterProperties.get(possibleCluster));
}
}
// verify uris are as we expect
for (String possibleCluster : _possibleClusters) {
LoadBalancerStateItem<UriProperties> uriItem = _state.getUriProperties(possibleCluster);
if (!_expectedUriProperties.containsKey(possibleCluster) || !_state.isListeningToCluster(possibleCluster)) {
assertTrue(uriItem == null || uriItem.getProperty() == null);
} else {
assertNotNull(uriItem);
assertEquals(uriItem.getProperty(), _expectedUriProperties.get(possibleCluster));
}
}
}
use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class SimpleLoadBalancerStateTest method testListenToCluster.
@Test(groups = { "small", "back-end" })
public void testListenToCluster() throws URISyntaxException, InterruptedException {
reset();
List<String> schemes = new ArrayList<>();
schemes.add("http");
assertFalse(_state.isListeningToCluster("cluster-1"));
assertNull(_state.getClusterProperties("cluster-1"));
final CountDownLatch latch = new CountDownLatch(1);
LoadBalancerStateListenerCallback callback = new LoadBalancerStateListenerCallback() {
@Override
public void done(int type, String name) {
latch.countDown();
}
};
_state.listenToCluster("cluster-1", callback);
if (!latch.await(5, TimeUnit.SECONDS)) {
fail("didn't get callback when listenToCluster was called");
}
assertTrue(_state.isListeningToCluster("cluster-1"));
assertNotNull(_state.getClusterProperties("cluster-1"));
assertNull(_state.getClusterProperties("cluster-1").getProperty());
ClusterProperties property = new ClusterProperties("cluster-1", schemes);
_clusterRegistry.put("cluster-1", property);
assertTrue(_state.isListeningToCluster("cluster-1"));
assertNotNull(_state.getClusterProperties("cluster-1"));
assertEquals(_state.getClusterProperties("cluster-1").getProperty(), property);
}
Aggregations