use of com.linkedin.d2.balancer.simple.SimpleLoadBalancerState in project rest.li by linkedin.
the class SimpleLoadBalancerTest method testLoadBalancerWithPartitionsSmoke.
// load balancer working with partitioned cluster
@Test(groups = { "small", "back-end" })
public void testLoadBalancerWithPartitionsSmoke() throws URISyntaxException, ServiceUnavailableException, InterruptedException, ExecutionException {
for (int tryAgain = 0; tryAgain < 12; ++tryAgain) {
Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>();
Map<String, TransportClientFactory> clientFactories = new HashMap<>();
List<String> prioritizedSchemes = new ArrayList<>();
MockStore<ServiceProperties> serviceRegistry = new MockStore<>();
MockStore<ClusterProperties> clusterRegistry = new MockStore<>();
MockStore<UriProperties> uriRegistry = new MockStore<>();
ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
clientFactories.put(PropertyKeys.HTTP_SCHEME, new DoNothingClientFactory());
SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriRegistry, clusterRegistry, serviceRegistry, clientFactories, loadBalancerStrategyFactories);
SimpleLoadBalancer loadBalancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS, executorService);
FutureCallback<None> balancerCallback = new FutureCallback<>();
loadBalancer.start(balancerCallback);
balancerCallback.get();
URI uri1 = URI.create("http://test.qa1.com:1234");
URI uri2 = URI.create("http://test.qa2.com:2345");
URI uri3 = URI.create("http://test.qa3.com:6789");
Map<URI, Double> uris = new HashMap<>();
uris.put(uri1, 1d);
uris.put(uri2, 1d);
uris.put(uri3, 1d);
Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<>();
Map<Integer, PartitionData> server1 = new HashMap<>();
server1.put(0, new PartitionData(1d));
server1.put(1, new PartitionData(1d));
Map<Integer, PartitionData> server2 = new HashMap<>();
server2.put(0, new PartitionData(1d));
Map<Integer, PartitionData> server3 = new HashMap<>();
server3.put(1, new PartitionData(1d));
partitionDesc.put(uri1, server1);
partitionDesc.put(uri2, server2);
partitionDesc.put(uri3, server3);
prioritizedSchemes.add(PropertyKeys.HTTP_SCHEME);
int partitionMethod = tryAgain % 4;
switch(partitionMethod) {
case 0:
clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<>(), new HashSet<>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 50, 2)));
break;
case 1:
clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<>(), new HashSet<>(), new HashBasedPartitionProperties("id=(\\d+)", 2, HashBasedPartitionProperties.HashAlgorithm.valueOf("MODULO"))));
break;
case 2:
clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<>(), new HashSet<>(), new HashBasedPartitionProperties("id=(\\d+)", 2, HashBasedPartitionProperties.HashAlgorithm.valueOf("MD5"))));
break;
case 3:
// test getRings with gap. here, no server serves partition 2
clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<>(), new HashSet<>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 50, 4)));
server3.put(3, new PartitionData(1d));
partitionDesc.put(uri3, server3);
break;
default:
break;
}
serviceRegistry.put("foo", new ServiceProperties("foo", "cluster-1", "/foo", Arrays.asList("degrader"), Collections.singletonMap(PropertyKeys.HTTP_LB_CONSISTENT_HASH_ALGORITHM, "pointBased"), null, null, prioritizedSchemes, null));
uriRegistry.put("cluster-1", new UriProperties("cluster-1", partitionDesc));
if (partitionMethod == 3) {
Map<Integer, Ring<URI>> ringMap = loadBalancer.getRings(URI.create("d2://foo"));
assertEquals(ringMap.size(), 4);
// the ring for partition 2 should be empty
assertEquals(ringMap.get(2).toString(), new ConsistentHashRing<>(Collections.emptyList()).toString());
continue;
}
URI expectedUri1 = URI.create("http://test.qa1.com:1234/foo");
URI expectedUri2 = URI.create("http://test.qa2.com:2345/foo");
URI expectedUri3 = URI.create("http://test.qa3.com:6789/foo");
Set<URI> expectedUris = new HashSet<>();
expectedUris.add(expectedUri1);
expectedUris.add(expectedUri2);
expectedUris.add(expectedUri3);
for (int i = 0; i < 1000; ++i) {
int ii = i % 100;
RewriteLoadBalancerClient client = (RewriteLoadBalancerClient) loadBalancer.getClient(new URIRequest("d2://foo/id=" + ii), new RequestContext());
String clientUri = client.getUri().toString();
HashFunction<String[]> hashFunction = null;
String[] str = new String[1];
// test KeyMapper target host hint: request is always to target host regardless of what's in d2 URI and whether it's hash-based or range-based partitions
RequestContext requestContextWithHint = new RequestContext();
KeyMapper.TargetHostHints.setRequestContextTargetHost(requestContextWithHint, uri1);
RewriteLoadBalancerClient hintedClient1 = (RewriteLoadBalancerClient) loadBalancer.getClient(new URIRequest("d2://foo/id=" + ii), requestContextWithHint);
String hintedUri1 = hintedClient1.getUri().toString();
Assert.assertEquals(hintedUri1, uri1.toString() + "/foo");
RewriteLoadBalancerClient hintedClient2 = (RewriteLoadBalancerClient) loadBalancer.getClient(new URIRequest("d2://foo/action=purge-all"), requestContextWithHint);
String hintedUri2 = hintedClient2.getUri().toString();
Assert.assertEquals(hintedUri2, uri1.toString() + "/foo");
if (partitionMethod == 2) {
hashFunction = new MD5Hash();
}
for (URI uri : expectedUris) {
if (clientUri.contains(uri.toString())) {
// check if only key belonging to partition 0 gets uri2
if (uri.equals(uri2)) {
if (partitionMethod == 0) {
assertTrue(ii < 50);
} else if (partitionMethod == 1) {
assertTrue(ii % 2 == 0);
} else {
str[0] = ii + "";
assertTrue(hashFunction.hash(str) % 2 == 0);
}
}
// check if only key belonging to partition 1 gets uri3
if (uri.equals(uri3)) {
if (partitionMethod == 0) {
assertTrue(ii >= 50);
} else if (partitionMethod == 1) {
assertTrue(ii % 2 == 1);
} else {
str[0] = ii + "";
assertTrue(hashFunction.hash(str) % 2 == 1);
}
}
}
}
}
// two rings for two partitions
Map<Integer, Ring<URI>> ringMap = loadBalancer.getRings(URI.create("d2://foo"));
assertEquals(ringMap.size(), 2);
if (partitionMethod != 2) {
Set<String> keys = new HashSet<>();
for (int j = 0; j < 50; j++) {
if (partitionMethod == 0) {
keys.add(j + "");
} else {
keys.add(j * 2 + "");
}
}
// if it is range based partition, all keys from 0 ~ 49 belong to partition 0 according to the range definition
// if it is modulo based partition, all even keys belong to partition 0 because the partition count is 2
// only from partition 0
MapKeyResult<Ring<URI>, String> mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
Map<Ring<URI>, Collection<String>> keyToPartition = mapKeyResult.getMapResult();
assertEquals(keyToPartition.size(), 1);
for (Ring<URI> ring : keyToPartition.keySet()) {
assertEquals(ring, ringMap.get(0));
}
// now also from partition 1
keys.add("51");
mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
assertEquals(mapKeyResult.getMapResult().size(), 2);
assertEquals(mapKeyResult.getUnmappedKeys().size(), 0);
// now only from partition 1
keys.clear();
keys.add("99");
mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
keyToPartition = mapKeyResult.getMapResult();
assertEquals(keyToPartition.size(), 1);
assertEquals(mapKeyResult.getUnmappedKeys().size(), 0);
for (Ring<URI> ring : keyToPartition.keySet()) {
assertEquals(ring, ringMap.get(1));
}
keys.add("100");
mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
if (partitionMethod == 0) {
// key out of range
Collection<MapKeyResult.UnmappedKey<String>> unmappedKeys = mapKeyResult.getUnmappedKeys();
assertEquals(unmappedKeys.size(), 1);
}
try {
loadBalancer.getClient(new URIRequest("d2://foo/id=100"), new RequestContext());
if (partitionMethod == 0) {
// key out of range
fail("Should throw ServiceUnavailableException caused by PartitionAccessException");
}
} catch (ServiceUnavailableException e) {
}
}
final CountDownLatch latch = new CountDownLatch(1);
PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {
@Override
public void done() {
latch.countDown();
}
};
state.shutdown(callback);
if (!latch.await(60, TimeUnit.SECONDS)) {
fail("unable to shutdown state");
}
executorService.shutdownNow();
assertTrue(executorService.isShutdown(), "ExecutorService should have shut down!");
}
}
use of com.linkedin.d2.balancer.simple.SimpleLoadBalancerState in project rest.li by linkedin.
the class SimpleLoadBalancerStateTest method reset.
public void reset(boolean useSSL, boolean enableRelativeLoadBalancer) {
_executorService = new SynchronousExecutorService();
_uriRegistry = new MockStore<>();
_clusterRegistry = new MockStore<>();
_serviceRegistry = new MockStore<>();
_clientFactories = new HashMap<>();
_loadBalancerStrategyFactories = new HashMap<>();
if (enableRelativeLoadBalancer) {
_loadBalancerStrategyFactories.put(RelativeLoadBalancerStrategy.RELATIVE_LOAD_BALANCER_STRATEGY_NAME, new RelativeLoadBalancerStrategyFactory(new ClockedExecutor(), null, Collections.emptyList(), new NoopEventEmitter(), SystemClock.instance()));
}
_loadBalancerStrategyFactories.put("random", new RandomLoadBalancerStrategyFactory());
_loadBalancerStrategyFactories.put("degraderV3", new DegraderLoadBalancerStrategyFactoryV3());
_loadBalancerStrategyFactories.put(DegraderLoadBalancerStrategyV3.DEGRADER_STRATEGY_NAME, new DegraderLoadBalancerStrategyFactoryV3());
try {
_sslContext = SSLContext.getDefault();
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
_sslParameters = new SSLParameters();
if (useSSL) {
_clientFactories.put("https", new SimpleLoadBalancerTest.DoNothingClientFactory());
_state = new SimpleLoadBalancerState(_executorService, new PropertyEventBusImpl<>(_executorService, _uriRegistry), new PropertyEventBusImpl<>(_executorService, _clusterRegistry), new PropertyEventBusImpl<>(_executorService, _serviceRegistry), _clientFactories, _loadBalancerStrategyFactories, _sslContext, _sslParameters, true, null, SSL_SESSION_VALIDATOR_FACTORY);
} else {
_clientFactories.put("http", new SimpleLoadBalancerTest.DoNothingClientFactory());
_state = new SimpleLoadBalancerState(_executorService, _uriRegistry, _clusterRegistry, _serviceRegistry, _clientFactories, _loadBalancerStrategyFactories);
}
FutureCallback<None> callback = new FutureCallback<>();
_state.start(callback);
try {
callback.get();
} catch (Exception e) {
Assert.fail("State start failed", e);
}
}
use of com.linkedin.d2.balancer.simple.SimpleLoadBalancerState in project rest.li by linkedin.
the class LoadBalancerEchoClient method getLoadBalancer.
public static SimpleLoadBalancer getLoadBalancer(String hostPort) throws IOException, PropertyStoreException {
// zk stores
ZooKeeperPermanentStore<ClusterProperties> zkClusterRegistry = null;
ZooKeeperPermanentStore<ServiceProperties> zkServiceRegistry = null;
ZooKeeperEphemeralStore<UriProperties> zkUriRegistry = null;
ZKConnection zkClient = new ZKConnection(hostPort, 10000);
zkClusterRegistry = new ZooKeeperPermanentStore<>(zkClient, new ClusterPropertiesJsonSerializer(), _basePath + "/clusters");
zkServiceRegistry = new ZooKeeperPermanentStore<>(zkClient, new ServicePropertiesJsonSerializer(), _basePath + "/services");
zkUriRegistry = new ZooKeeperEphemeralStore<>(zkClient, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), _basePath + "/uris", false, true);
// fs stores
File testDirectory = LoadBalancerUtil.createTempDirectory("lb-degrader-witih-file-store-large");
testDirectory.deleteOnExit();
new File(testDirectory + File.separator + "cluster").mkdir();
new File(testDirectory + File.separator + "service").mkdir();
new File(testDirectory + File.separator + "uri").mkdir();
FileStore<ClusterProperties> fsClusterStore = new FileStore<>(testDirectory + File.separator + "cluster", FileSystemDirectory.FILE_STORE_EXTENSION, new ClusterPropertiesJsonSerializer());
FileStore<ServiceProperties> fsServiceStore = new FileStore<>(testDirectory + File.separator + "service", FileSystemDirectory.FILE_STORE_EXTENSION, new ServicePropertiesJsonSerializer());
FileStore<UriProperties> fsUriStore = new FileStore<>(testDirectory + File.separator + "uri", FileSystemDirectory.FILE_STORE_EXTENSION, new UriPropertiesJsonSerializer());
// chains
PropertyEventThread thread = new PropertyEventThread("echo client event thread");
ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("echo client event thread"));
// start up the world
thread.start();
PropertyEventBus<ServiceProperties> serviceBus = new PropertyEventBusImpl<>(executorService, zkServiceRegistry);
serviceBus.register(fsServiceStore);
new ZooKeeperTogglingStore<>(zkServiceRegistry, fsServiceStore, serviceBus, true);
PropertyEventBus<UriProperties> uriBus = new PropertyEventBusImpl<>(executorService, zkUriRegistry);
uriBus.register(fsUriStore);
new ZooKeeperTogglingStore<>(zkUriRegistry, fsUriStore, uriBus, true);
PropertyEventBus<ClusterProperties> clusterBus = new PropertyEventBusImpl<>(executorService, zkClusterRegistry);
clusterBus.register(fsClusterStore);
new ZooKeeperTogglingStore<>(zkClusterRegistry, fsClusterStore, clusterBus, true);
Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>();
// strategy and scheme factories
loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
Map<String, TransportClientFactory> clientFactories = new HashMap<>();
clientFactories.put("http", new HttpClientFactory.Builder().build());
// create the state
SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriBus, clusterBus, serviceBus, clientFactories, loadBalancerStrategyFactories, null, null, false);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS, executorService);
new JmxManager().registerLoadBalancer("balancer", balancer).registerLoadBalancerState("state", state);
return balancer;
}
use of com.linkedin.d2.balancer.simple.SimpleLoadBalancerState in project rest.li by linkedin.
the class SimpleLoadBalancerSimulation method reset.
/**
* Reset the entire state of the simulation.
*/
public void reset() {
// simulation state
_random = new Random();
_possibleServices = Collections.synchronizedList(new ArrayList<>());
_possibleClusters = Collections.synchronizedList(new ArrayList<>());
_possiblePaths = Collections.synchronizedList(new ArrayList<>());
_possibleSchemes = Collections.synchronizedList(new ArrayList<>());
_possibleStrategies = Collections.synchronizedList(new ArrayList<>());
_possibleUris = Collections.synchronizedList(new ArrayList<>());
// load balancer state
_executorService = Executors.newSingleThreadScheduledExecutor();
;
// pretend that these are zk stores
_serviceRegistry = new MockStore<>();
_uriRegistry = new MockStore<>();
_clusterRegistry = new MockStore<>();
_loadBalancerStrategyFactories = new HashMap<>();
_clientFactories = new HashMap<>();
_state = new SimpleLoadBalancerState(_executorService, _uriRegistry, _clusterRegistry, _serviceRegistry, _clientFactories, _loadBalancerStrategyFactories);
_loadBalancer = new SimpleLoadBalancer(_state, 10, TimeUnit.SECONDS, _executorService);
FutureCallback<None> callback = new FutureCallback<>();
_loadBalancer.start(callback);
try {
callback.get();
} catch (Exception e) {
throw new RuntimeException("Balancer start failed", e);
}
// verification state
_expectedServiceProperties = new ConcurrentHashMap<>();
_expectedClusterProperties = new ConcurrentHashMap<>();
_expectedUriProperties = new ConcurrentHashMap<>();
_totalMessages = 0;
// TODO parameterize this
for (int i = 0; i < 10; ++i) {
_possibleServices.add("service-" + i);
_possibleClusters.add("cluster-" + i);
_possiblePaths.add("/some/path/" + i);
_possibleSchemes.add("scheme" + i % 3);
_possibleStrategies.add("strategy-" + i);
_clientFactories.put("scheme" + i % 2, new DoNothingClientFactory());
_loadBalancerStrategyFactories.put("strategy-" + i, _loadBalancerStrategyFactoryToTest);
}
for (int i = 0; i < 1000; ++i) {
_possibleUris.add(URI.create(random(_possibleSchemes) + "://host" + i % 100 + ":" + (1000 + _random.nextInt(1000)) + random(_possiblePaths)));
}
// add bad stuff
// add a bad scheme to prioritized schemes
_possibleSchemes.add("BAD_PRIORITIZED_SCHEME");
// add a bad scheme to possible uris
_possibleUris.add(URI.create("BADSCHEME://host1001:" + (1000 + _random.nextInt(1000)) + random(_possiblePaths)));
// register jmx goodies
new JmxManager().registerLoadBalancer("SimpleLoadBalancer", _loadBalancer).registerLoadBalancerState("SimpleLoadBalancerState", _state);
}
use of com.linkedin.d2.balancer.simple.SimpleLoadBalancerState in project rest.li by linkedin.
the class MockLBFactory method createLoadBalancer.
static SimpleLoadBalancer createLoadBalancer() {
// define the load balancing strategies that we support (round robin, etc)
Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>();
loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
Map<String, TransportClientFactory> clientFactories = new HashMap<>();
clientFactories.put("http", new HttpClientFactory.Builder().build());
SynchronousExecutorService executorService = new SynchronousExecutorService();
MockStore<ServiceProperties> serviceRegistry = new MockStore<>();
MockStore<ClusterProperties> clusterRegistry = new MockStore<>();
MockStore<UriProperties> uriRegistry = new MockStore<>();
SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriRegistry, clusterRegistry, serviceRegistry, clientFactories, loadBalancerStrategyFactories);
state.listenToService("greetings", new LoadBalancerState.NullStateListenerCallback());
state.listenToService("groups", new LoadBalancerState.NullStateListenerCallback());
state.listenToCluster("testcluster", new LoadBalancerState.NullStateListenerCallback());
state.listenToCluster("badcluster", new LoadBalancerState.NullStateListenerCallback());
List<String> schemes = new ArrayList<>();
schemes.add("http");
Map<String, Object> metadataProperties = new HashMap<>();
metadataProperties.put(RestConstants.RESTLI_PROTOCOL_VERSION_PROPERTY, AllProtocolVersions.BASELINE_PROTOCOL_VERSION.toString());
serviceRegistry.put("greetings", new ServiceProperties("greetings", "testcluster", "/greetings", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, schemes, null, metadataProperties));
serviceRegistry.put("groups", new ServiceProperties("groups", "badcluster", "/groups", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, schemes, null, metadataProperties));
clusterRegistry.put("testcluster", new ClusterProperties("testcluster"));
clusterRegistry.put("badcluster", new ClusterProperties("badcluster"));
uriRegistry.put("testcluster", new UriProperties("testcluster", createUriData("http://localhost:1338")));
uriRegistry.put("badcluster", new UriProperties("badcluster", createUriData("http://localhost:1337")));
// create the load balancer
return new SimpleLoadBalancer(state, executorService);
}
Aggregations