use of com.linkedin.d2.discovery.event.PropertyEventThread in project rest.li by linkedin.
the class PropertyEventThreadTest method testUnstartedThread.
@Test(groups = { "small", "back-end" })
public void testUnstartedThread() {
PropertyEventThread thread = new PropertyEventThread("test", 1001);
assertEquals(thread.getRemainingCapacity(), 1001);
assertEquals(thread.getQueuedMessageCount(), 0);
assertFalse(thread.isAlive());
assertTrue(thread.isDaemon());
// assertFalse(thread.send(null));
// assertFalse(thread.send(new PropertyTestEvent("donothing")));
}
use of com.linkedin.d2.discovery.event.PropertyEventThread in project rest.li by linkedin.
the class PropertyEventThreadTest method testThread.
@Test(groups = { "small", "back-end" })
public void testThread() throws InterruptedException {
PropertyEventThread thread = new PropertyEventThread("test");
PropertyTestEvent testEvent = new PropertyTestEvent("counter");
// Test doesn't make sense with hacked PropertyEventThread
// assertFalse(thread.send(testEvent));
thread.start();
assertTrue(thread.send(testEvent));
assertTrue(thread.send(testEvent));
thread.interrupt();
thread.join(0);
// Also doesn't make sense with hack
// assertFalse(thread.send(testEvent));
assertEquals(testEvent.getCount(), 2);
}
use of com.linkedin.d2.discovery.event.PropertyEventThread in project rest.li by linkedin.
the class LoadBalancerEchoClient method getLoadBalancer.
public static SimpleLoadBalancer getLoadBalancer(String hostPort) throws IOException, PropertyStoreException {
// zk stores
ZooKeeperPermanentStore<ClusterProperties> zkClusterRegistry = null;
ZooKeeperPermanentStore<ServiceProperties> zkServiceRegistry = null;
ZooKeeperEphemeralStore<UriProperties> zkUriRegistry = null;
ZKConnection zkClient = new ZKConnection(hostPort, 10000);
zkClusterRegistry = new ZooKeeperPermanentStore<>(zkClient, new ClusterPropertiesJsonSerializer(), _basePath + "/clusters");
zkServiceRegistry = new ZooKeeperPermanentStore<>(zkClient, new ServicePropertiesJsonSerializer(), _basePath + "/services");
zkUriRegistry = new ZooKeeperEphemeralStore<>(zkClient, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), _basePath + "/uris", false, true);
// fs stores
File testDirectory = LoadBalancerUtil.createTempDirectory("lb-degrader-witih-file-store-large");
testDirectory.deleteOnExit();
new File(testDirectory + File.separator + "cluster").mkdir();
new File(testDirectory + File.separator + "service").mkdir();
new File(testDirectory + File.separator + "uri").mkdir();
FileStore<ClusterProperties> fsClusterStore = new FileStore<>(testDirectory + File.separator + "cluster", FileSystemDirectory.FILE_STORE_EXTENSION, new ClusterPropertiesJsonSerializer());
FileStore<ServiceProperties> fsServiceStore = new FileStore<>(testDirectory + File.separator + "service", FileSystemDirectory.FILE_STORE_EXTENSION, new ServicePropertiesJsonSerializer());
FileStore<UriProperties> fsUriStore = new FileStore<>(testDirectory + File.separator + "uri", FileSystemDirectory.FILE_STORE_EXTENSION, new UriPropertiesJsonSerializer());
// chains
PropertyEventThread thread = new PropertyEventThread("echo client event thread");
ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("echo client event thread"));
// start up the world
thread.start();
PropertyEventBus<ServiceProperties> serviceBus = new PropertyEventBusImpl<>(executorService, zkServiceRegistry);
serviceBus.register(fsServiceStore);
new ZooKeeperTogglingStore<>(zkServiceRegistry, fsServiceStore, serviceBus, true);
PropertyEventBus<UriProperties> uriBus = new PropertyEventBusImpl<>(executorService, zkUriRegistry);
uriBus.register(fsUriStore);
new ZooKeeperTogglingStore<>(zkUriRegistry, fsUriStore, uriBus, true);
PropertyEventBus<ClusterProperties> clusterBus = new PropertyEventBusImpl<>(executorService, zkClusterRegistry);
clusterBus.register(fsClusterStore);
new ZooKeeperTogglingStore<>(zkClusterRegistry, fsClusterStore, clusterBus, true);
Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>();
// strategy and scheme factories
loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
Map<String, TransportClientFactory> clientFactories = new HashMap<>();
clientFactories.put("http", new HttpClientFactory.Builder().build());
// create the state
SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriBus, clusterBus, serviceBus, clientFactories, loadBalancerStrategyFactories, null, null, false);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS, executorService);
new JmxManager().registerLoadBalancer("balancer", balancer).registerLoadBalancerState("state", state);
return balancer;
}
use of com.linkedin.d2.discovery.event.PropertyEventThread in project rest.li by linkedin.
the class SimpleLoadBalancerSimulation method verifyState.
/**
* Compare the simulator's view of reality with the load balancer's. This method should
* be called after every step is performed and all threads have finished.
*/
public void verifyState() {
// verify that we consumed all messages before we do anything
for (int i = 0; i < _queues.length; ++i) {
if (_queues[i].size() > 0) {
fail("there were messages left in the queue. all messages should have been consumed during this simulation step.");
}
}
// verify that all clients have been shut down
for (Map.Entry<String, TransportClientFactory> e : _clientFactories.entrySet()) {
DoNothingClientFactory factory = (DoNothingClientFactory) e.getValue();
if (factory.getRunningClientCount() != 0) {
fail("Not all clients were shut down from factory " + e.getKey());
}
}
try {
final CountDownLatch latch = new CountDownLatch(1);
PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {
@Override
public void done() {
latch.countDown();
}
};
_state.shutdown(callback);
if (!latch.await(60, TimeUnit.SECONDS)) {
fail("unable to shutdown state");
}
} catch (InterruptedException e) {
fail("unable to shutdown state in verifyState.");
}
// New load balancer with no timeout; the code below checks for services that don't
// exist,
// and a load balancer with non-zero timeout will just timeout waiting for them to be
// registered, which will never happen because the PropertyEventThread is shut down.
_loadBalancer = new SimpleLoadBalancer(_state, 0, TimeUnit.SECONDS, _executorService);
// verify services are as we expect
for (String possibleService : _possibleServices) {
// about it
if (!_expectedServiceProperties.containsKey(possibleService) || !_state.isListeningToService(possibleService)) {
LoadBalancerStateItem<ServiceProperties> serviceItem = _state.getServiceProperties(possibleService);
assertTrue(serviceItem == null || serviceItem.getProperty() == null);
} else {
ServiceProperties serviceProperties = _expectedServiceProperties.get(possibleService);
ClusterProperties clusterProperties = _expectedClusterProperties.get(serviceProperties.getClusterName());
UriProperties uriProperties = _expectedUriProperties.get(serviceProperties.getClusterName());
assertEquals(_state.getServiceProperties(possibleService).getProperty(), serviceProperties);
// verify round robin'ing of the hosts for this service
for (int i = 0; i < 100; ++i) {
try {
// this call will queue up messages if we're not listening to the service, but
// it's ok, because all of the messengers have been stopped.
final TransportClient client = _loadBalancer.getClient(new URIRequest("d2://" + possibleService + random(_possiblePaths)), new RequestContext());
// if we didn't receive service unavailable, we should
// get a client back
assertNotNull(client, "Not found client for: d2://" + possibleService + random(_possiblePaths));
} catch (ServiceUnavailableException e) {
if (uriProperties != null && clusterProperties != null) {
// only way to get here is if the prioritized
// schemes could find no available uris in the
// cluster. let's see if we can find a URI that
// matches a prioritized scheme in the cluster.
Set<String> schemes = new HashSet<>();
for (URI uri : uriProperties.Uris()) {
schemes.add(uri.getScheme());
}
for (String scheme : clusterProperties.getPrioritizedSchemes()) {
// the code.
if (schemes.contains(scheme) && _clientFactories.containsKey(scheme)) {
break;
}
assertFalse(schemes.contains(scheme) && _clientFactories.containsKey(scheme), "why couldn't a client be found for schemes " + clusterProperties.getPrioritizedSchemes() + " with URIs: " + uriProperties.Uris());
}
}
}
}
}
}
// verify clusters are as we expect
for (String possibleCluster : _possibleClusters) {
LoadBalancerStateItem<ClusterProperties> clusterItem = _state.getClusterProperties(possibleCluster);
if (!_expectedClusterProperties.containsKey(possibleCluster) || !_state.isListeningToCluster(possibleCluster)) {
assertTrue(clusterItem == null || clusterItem.getProperty() == null, "cluster item for " + possibleCluster + " is not null: " + clusterItem);
} else {
assertNotNull(clusterItem, "Item for cluster " + possibleCluster + " should not be null, listening: " + _state.isListeningToCluster(possibleCluster) + ", keys: " + _expectedClusterProperties.keySet());
assertEquals(clusterItem.getProperty(), _expectedClusterProperties.get(possibleCluster));
}
}
// verify uris are as we expect
for (String possibleCluster : _possibleClusters) {
LoadBalancerStateItem<UriProperties> uriItem = _state.getUriProperties(possibleCluster);
if (!_expectedUriProperties.containsKey(possibleCluster) || !_state.isListeningToCluster(possibleCluster)) {
assertTrue(uriItem == null || uriItem.getProperty() == null);
} else {
assertNotNull(uriItem);
assertEquals(uriItem.getProperty(), _expectedUriProperties.get(possibleCluster));
}
}
}
Aggregations