use of com.linkedin.d2.balancer.util.partitions.PartitionAccessException in project rest.li by linkedin.
the class SimpleLoadBalancer method chooseTrackerClient.
private TrackerClient chooseTrackerClient(Request request, RequestContext requestContext, String serviceName, String clusterName, ClusterProperties cluster, LoadBalancerStateItem<UriProperties> uriItem, UriProperties uris, List<LoadBalancerState.SchemeStrategyPair> orderedStrategies, ServiceProperties serviceProperties) throws ServiceUnavailableException {
// now try and find a tracker client for the uri
TrackerClient trackerClient = null;
URI targetHost = KeyMapper.TargetHostHints.getRequestContextTargetHost(requestContext);
int partitionId = -1;
URI requestUri = request.getURI();
if (targetHost == null) {
PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
try {
partitionId = accessor.getPartitionId(requestUri);
} catch (PartitionAccessException e) {
die(serviceName, "Error in finding the partition for URI: " + requestUri + ", " + e.getMessage());
}
} else {
// This is the case of scatter/gather or search, where the target host may be chosen to be responsible for
// more than one partitions (The target host was picked from a consistent hash ring, so load balancing is already in effect).
// we randomly pick one partition to check for the call dropping
// This is done for two reasons:
// 1. Currently there is no way to know for which subset of partitions the target host is chosen for
// if it is serving more than one partitions. This can be added, but it requires the change of public interfaces (KeyMapper) so that
// more hints can be added to the request context for the concerned the partitions
// 2. More importantly, there is no good way to check for call dropping even if the above problem is solved.
// For example, if a target host is chosen for partition 1, 5, 7, with call drop rates of 0, 0.2, 0.4 respectively
// A reasonable way to proceed would be use the highest drop rate and do the check once for the target host,
// but currently the check can only be done for each partition and only with boolean result (no access to drop rate)
// The partition to check is picked at random to be conservative.
// E.g. in the above example, we don't want to always use the drop rate of partition 1.
Map<Integer, PartitionData> partitionDataMap = uris.getPartitionDataMap(targetHost);
if (partitionDataMap == null || partitionDataMap.isEmpty()) {
die(serviceName, "There is no partition data for server host: " + targetHost + ". URI: " + requestUri);
}
Set<Integer> partitions = partitionDataMap.keySet();
Iterator<Integer> iterator = partitions.iterator();
int index = _random.nextInt(partitions.size());
for (int i = 0; i <= index; i++) {
partitionId = iterator.next();
}
}
List<TrackerClient> clientsToLoadBalance = null;
for (LoadBalancerState.SchemeStrategyPair pair : orderedStrategies) {
LoadBalancerStrategy strategy = pair.getStrategy();
String scheme = pair.getScheme();
clientsToLoadBalance = getPotentialClients(serviceName, serviceProperties, uris, scheme, partitionId);
trackerClient = strategy.getTrackerClient(request, requestContext, uriItem.getVersion(), partitionId, clientsToLoadBalance);
debug(_log, "load balancer strategy for ", serviceName, " returned: ", trackerClient);
// break as soon as we find an available cluster client
if (trackerClient != null) {
break;
}
}
if (trackerClient == null) {
if (clientsToLoadBalance == null || clientsToLoadBalance.isEmpty()) {
die(serviceName, "Service: " + serviceName + " unable to find a host to route the request" + " in partition: " + partitionId + " cluster: " + clusterName + ". Check what cluster your servers are announcing to.");
} else {
die(serviceName, "Service: " + serviceName + " is in a bad state (high latency/high error). " + "Dropping request. Cluster: " + clusterName + ", partitionId:" + partitionId + " (" + clientsToLoadBalance.size() + " hosts)");
}
}
return trackerClient;
}
use of com.linkedin.d2.balancer.util.partitions.PartitionAccessException in project rest.li by linkedin.
the class SimpleLoadBalancer method getRings.
@Override
public <K> MapKeyResult<Ring<URI>, K> getRings(URI serviceUri, Iterable<K> keys) throws ServiceUnavailableException {
ServiceProperties service = listenToServiceAndCluster(serviceUri);
String serviceName = service.getServiceName();
String clusterName = service.getClusterName();
ClusterProperties cluster = getClusterProperties(serviceName, clusterName);
LoadBalancerStateItem<UriProperties> uriItem = getUriItem(serviceName, clusterName, cluster);
UriProperties uris = uriItem.getProperty();
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = _state.getStrategiesForService(serviceName, service.getPrioritizedSchemes());
if (!orderedStrategies.isEmpty()) {
LoadBalancerState.SchemeStrategyPair pair = orderedStrategies.get(0);
PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
// first distribute keys to partitions
Map<Integer, Set<K>> partitionSet = new HashMap<Integer, Set<K>>();
List<MapKeyResult.UnmappedKey<K>> unmappedKeys = new ArrayList<MapKeyResult.UnmappedKey<K>>();
for (final K key : keys) {
int partitionId;
try {
partitionId = accessor.getPartitionId(key.toString());
} catch (PartitionAccessException e) {
unmappedKeys.add(new MapKeyResult.UnmappedKey<K>(key, MapKeyResult.ErrorType.FAIL_TO_FIND_PARTITION));
continue;
}
Set<K> set = partitionSet.get(partitionId);
if (set == null) {
set = new HashSet<K>();
partitionSet.put(partitionId, set);
}
set.add(key);
}
// then we find the ring for each partition and create a map of Ring<URI> to Set<K>
final Map<Ring<URI>, Collection<K>> ringMap = new IdentityHashMap<Ring<URI>, Collection<K>>(partitionSet.size() * 2);
for (Map.Entry<Integer, Set<K>> entry : partitionSet.entrySet()) {
int partitionId = entry.getKey();
List<TrackerClient> clients = getPotentialClients(serviceName, service, uris, pair.getScheme(), partitionId);
Ring<URI> ring = pair.getStrategy().getRing(uriItem.getVersion(), partitionId, clients);
// make sure the same ring is not used in other partition
Object oldValue = ringMap.put(ring, entry.getValue());
assert (oldValue == null);
}
return new MapKeyResult<Ring<URI>, K>(ringMap, unmappedKeys);
} else {
throw new ServiceUnavailableException(serviceName, "Unable to find a load balancer strategy");
}
}
use of com.linkedin.d2.balancer.util.partitions.PartitionAccessException in project rest.li by linkedin.
the class TestD2Config method testSingleClusterRangePartitions.
// preliminary test for partitioning cluster
@Test
public static void testSingleClusterRangePartitions() throws IOException, InterruptedException, URISyntaxException, Exception {
@SuppressWarnings("serial") final Map<String, List<String>> clustersData = new HashMap<String, List<String>>() {
{
put("partitioned-cluster", Arrays.asList(new String[] { "partitioned-service-1", "partitioned-service-2" }));
}
};
final Map<String, Object> partitionProperties = new HashMap<String, Object>();
Map<String, Object> rangeBased = new HashMap<String, Object>();
rangeBased.put("partitionKeyRegex", "\\bid\\b=(\\d+)");
rangeBased.put("keyRangeStart", "0");
rangeBased.put("partitionCount", "10");
rangeBased.put("partitionSize", "100");
rangeBased.put("partitionType", "RANGE");
partitionProperties.put("partitionProperties", rangeBased);
D2ConfigTestUtil d2Conf = new D2ConfigTestUtil(clustersData, partitionProperties);
assertEquals(d2Conf.runDiscovery(_zkHosts), 0);
verifyPartitionProperties("partitioned-cluster", partitionProperties);
final ClusterProperties clusterprops = getClusterProperties(_zkclient, "partitioned-cluster");
final PartitionAccessor accessor = PartitionAccessorFactory.getPartitionAccessor(clusterprops.getPartitionProperties());
try {
accessor.getPartitionId(-1 + "");
fail("Exception expected");
} catch (PartitionAccessException e) {
}
try {
accessor.getPartitionId(1000 + "");
fail("Exception expected");
} catch (PartitionAccessException e) {
}
assertEquals(0, accessor.getPartitionId(0 + ""));
assertEquals(0, accessor.getPartitionId(99 + ""));
assertEquals(1, accessor.getPartitionId(176 + ""));
assertEquals(8, accessor.getPartitionId(833 + ""));
final String legalUri1 = "/profiles?field=position&id=100";
final String legalUri2 = "/profiles?wid=99&id=176&randid=301";
final String illegalUri1 = "/profiles?wid=99";
final String illegalUri2 = "/profiles?id=1000000000000000000000000000000000000000000000111111111";
try {
accessor.getPartitionId(URI.create(illegalUri1));
fail("Exception expected");
} catch (PartitionAccessException e) {
}
try {
accessor.getPartitionId(URI.create(illegalUri2));
fail("Exception expected");
} catch (PartitionAccessException e) {
}
assertEquals(1, accessor.getPartitionId(URI.create(legalUri1)));
assertEquals(1, accessor.getPartitionId(URI.create(legalUri2)));
// Start Echo server on cluster-1
Map<Integer, Double> serverConfig1 = new HashMap<Integer, Double>();
serverConfig1.put(0, 0.5d);
serverConfig1.put(3, 0.5d);
Map<Integer, Double> serverConfig2 = new HashMap<Integer, Double>();
serverConfig2.put(0, 0.25d);
serverConfig2.put(1, 0.5d);
serverConfig2.put(2, 0.5d);
final int echoServerPort1 = 2346;
final int echoServerPort2 = 2347;
_echoServerList.add(startEchoServer(echoServerPort1, "partitioned-cluster", serverConfig1));
_echoServerList.add(startEchoServer(echoServerPort2, "partitioned-cluster", serverConfig2));
Map<URI, Map<Integer, Double>> partitionWeights = new HashMap<URI, Map<Integer, Double>>();
partitionWeights.put(URI.create("http://127.0.0.1:" + echoServerPort1 + "/partitioned-cluster"), serverConfig1);
partitionWeights.put(URI.create("http://127.0.0.1:" + echoServerPort2 + "/partitioned-cluster"), serverConfig2);
verifyPartitionedUriProperties("partitioned-cluster", partitionWeights);
}
use of com.linkedin.d2.balancer.util.partitions.PartitionAccessException in project rest.li by linkedin.
the class SimpleLoadBalancerTest method testLoadBalancerWithPartitionsSmoke.
// load balancer working with partitioned cluster
@Test(groups = { "small", "back-end" })
public void testLoadBalancerWithPartitionsSmoke() throws URISyntaxException, ServiceUnavailableException, InterruptedException, ExecutionException {
for (int tryAgain = 0; tryAgain < 12; ++tryAgain) {
Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>>();
Map<String, TransportClientFactory> clientFactories = new HashMap<String, TransportClientFactory>();
List<String> prioritizedSchemes = new ArrayList<String>();
MockStore<ServiceProperties> serviceRegistry = new MockStore<ServiceProperties>();
MockStore<ClusterProperties> clusterRegistry = new MockStore<ClusterProperties>();
MockStore<UriProperties> uriRegistry = new MockStore<UriProperties>();
ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
clientFactories.put("http", new DoNothingClientFactory());
SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriRegistry, clusterRegistry, serviceRegistry, clientFactories, loadBalancerStrategyFactories);
SimpleLoadBalancer loadBalancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS);
FutureCallback<None> balancerCallback = new FutureCallback<None>();
loadBalancer.start(balancerCallback);
balancerCallback.get();
URI uri1 = URI.create("http://test.qa1.com:1234");
URI uri2 = URI.create("http://test.qa2.com:2345");
URI uri3 = URI.create("http://test.qa3.com:6789");
Map<URI, Double> uris = new HashMap<URI, Double>();
uris.put(uri1, 1d);
uris.put(uri2, 1d);
uris.put(uri3, 1d);
Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<URI, Map<Integer, PartitionData>>();
Map<Integer, PartitionData> server1 = new HashMap<Integer, PartitionData>();
server1.put(0, new PartitionData(1d));
server1.put(1, new PartitionData(1d));
Map<Integer, PartitionData> server2 = new HashMap<Integer, PartitionData>();
server2.put(0, new PartitionData(1d));
Map<Integer, PartitionData> server3 = new HashMap<Integer, PartitionData>();
server3.put(1, new PartitionData(1d));
partitionDesc.put(uri1, server1);
partitionDesc.put(uri2, server2);
partitionDesc.put(uri3, server3);
prioritizedSchemes.add("http");
int partitionMethod = tryAgain % 4;
switch(partitionMethod) {
case 0:
clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 50, 2)));
break;
case 1:
clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new HashBasedPartitionProperties("id=(\\d+)", 2, HashBasedPartitionProperties.HashAlgorithm.valueOf("MODULO"))));
break;
case 2:
clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new HashBasedPartitionProperties("id=(\\d+)", 2, HashBasedPartitionProperties.HashAlgorithm.valueOf("MD5"))));
break;
case 3:
// test getRings with gap. here, no server serves partition 2
clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 50, 4)));
server3.put(3, new PartitionData(1d));
partitionDesc.put(uri3, server3);
break;
default:
break;
}
serviceRegistry.put("foo", new ServiceProperties("foo", "cluster-1", "/foo", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, prioritizedSchemes, null));
uriRegistry.put("cluster-1", new UriProperties("cluster-1", partitionDesc));
if (partitionMethod == 3) {
Map<Integer, Ring<URI>> ringMap = loadBalancer.getRings(URI.create("d2://foo"));
assertEquals(ringMap.size(), 4);
// the ring for partition 2 should be empty
assertEquals(ringMap.get(2).toString(), new ConsistentHashRing<URI>(Collections.emptyList()).toString());
continue;
}
URI expectedUri1 = URI.create("http://test.qa1.com:1234/foo");
URI expectedUri2 = URI.create("http://test.qa2.com:2345/foo");
URI expectedUri3 = URI.create("http://test.qa3.com:6789/foo");
Set<URI> expectedUris = new HashSet<URI>();
expectedUris.add(expectedUri1);
expectedUris.add(expectedUri2);
expectedUris.add(expectedUri3);
for (int i = 0; i < 1000; ++i) {
int ii = i % 100;
RewriteClient client = (RewriteClient) loadBalancer.getClient(new URIRequest("d2://foo/id=" + ii), new RequestContext());
String clientUri = client.getUri().toString();
HashFunction<String[]> hashFunction = null;
String[] str = new String[1];
// test KeyMapper target host hint: request is always to target host regardless of what's in d2 URI and whether it's hash-based or range-based partitions
RequestContext requestContextWithHint = new RequestContext();
KeyMapper.TargetHostHints.setRequestContextTargetHost(requestContextWithHint, uri1);
RewriteClient hintedClient1 = (RewriteClient) loadBalancer.getClient(new URIRequest("d2://foo/id=" + ii), requestContextWithHint);
String hintedUri1 = hintedClient1.getUri().toString();
Assert.assertEquals(hintedUri1, uri1.toString() + "/foo");
RewriteClient hintedClient2 = (RewriteClient) loadBalancer.getClient(new URIRequest("d2://foo/action=purge-all"), requestContextWithHint);
String hintedUri2 = hintedClient2.getUri().toString();
Assert.assertEquals(hintedUri2, uri1.toString() + "/foo");
if (partitionMethod == 2) {
hashFunction = new MD5Hash();
}
for (URI uri : expectedUris) {
if (clientUri.contains(uri.toString())) {
// check if only key belonging to partition 0 gets uri2
if (uri.equals(uri2)) {
if (partitionMethod == 0) {
assertTrue(ii < 50);
} else if (partitionMethod == 1) {
assertTrue(ii % 2 == 0);
} else {
str[0] = ii + "";
assertTrue(hashFunction.hash(str) % 2 == 0);
}
}
// check if only key belonging to partition 1 gets uri3
if (uri.equals(uri3)) {
if (partitionMethod == 0) {
assertTrue(ii >= 50);
} else if (partitionMethod == 1) {
assertTrue(ii % 2 == 1);
} else {
str[0] = ii + "";
assertTrue(hashFunction.hash(str) % 2 == 1);
}
}
}
}
}
// two rings for two partitions
Map<Integer, Ring<URI>> ringMap = loadBalancer.getRings(URI.create("d2://foo"));
assertEquals(ringMap.size(), 2);
if (partitionMethod != 2) {
Set<String> keys = new HashSet<String>();
for (int j = 0; j < 50; j++) {
if (partitionMethod == 0) {
keys.add(j + "");
} else {
keys.add(j * 2 + "");
}
}
// if it is range based partition, all keys from 0 ~ 49 belong to partition 0 according to the range definition
// if it is modulo based partition, all even keys belong to partition 0 because the partition count is 2
// only from partition 0
MapKeyResult<Ring<URI>, String> mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
Map<Ring<URI>, Collection<String>> keyToPartition = mapKeyResult.getMapResult();
assertEquals(keyToPartition.size(), 1);
for (Ring<URI> ring : keyToPartition.keySet()) {
assertEquals(ring, ringMap.get(0));
}
// now also from partition 1
keys.add("51");
mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
assertEquals(mapKeyResult.getMapResult().size(), 2);
assertEquals(mapKeyResult.getUnmappedKeys().size(), 0);
// now only from partition 1
keys.clear();
keys.add("99");
mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
keyToPartition = mapKeyResult.getMapResult();
assertEquals(keyToPartition.size(), 1);
assertEquals(mapKeyResult.getUnmappedKeys().size(), 0);
for (Ring<URI> ring : keyToPartition.keySet()) {
assertEquals(ring, ringMap.get(1));
}
keys.add("100");
mapKeyResult = loadBalancer.getRings(URI.create("d2://foo"), keys);
if (partitionMethod == 0) {
// key out of range
Collection<MapKeyResult.UnmappedKey<String>> unmappedKeys = mapKeyResult.getUnmappedKeys();
assertEquals(unmappedKeys.size(), 1);
}
try {
loadBalancer.getClient(new URIRequest("d2://foo/id=100"), new RequestContext());
if (partitionMethod == 0) {
// key out of range
fail("Should throw ServiceUnavailableException caused by PartitionAccessException");
}
} catch (ServiceUnavailableException e) {
}
}
final CountDownLatch latch = new CountDownLatch(1);
PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {
@Override
public void done() {
latch.countDown();
}
};
state.shutdown(callback);
if (!latch.await(60, TimeUnit.SECONDS)) {
fail("unable to shutdown state");
}
executorService.shutdownNow();
assertTrue(executorService.isShutdown(), "ExecutorService should have shut down!");
}
}
Aggregations