use of com.linkedin.d2.balancer.util.partitions.PartitionAccessor in project rest.li by linkedin.
the class SimpleLoadBalancer method chooseTrackerClient.
private TrackerClient chooseTrackerClient(Request request, RequestContext requestContext, String serviceName, String clusterName, ClusterProperties cluster, LoadBalancerStateItem<UriProperties> uriItem, UriProperties uris, List<LoadBalancerState.SchemeStrategyPair> orderedStrategies, ServiceProperties serviceProperties) throws ServiceUnavailableException {
// now try and find a tracker client for the uri
TrackerClient trackerClient = null;
URI targetHost = KeyMapper.TargetHostHints.getRequestContextTargetHost(requestContext);
int partitionId = -1;
URI requestUri = request.getURI();
if (targetHost == null) {
PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
try {
partitionId = accessor.getPartitionId(requestUri);
} catch (PartitionAccessException e) {
die(serviceName, "Error in finding the partition for URI: " + requestUri + ", " + e.getMessage());
}
} else {
// This is the case of scatter/gather or search, where the target host may be chosen to be responsible for
// more than one partitions (The target host was picked from a consistent hash ring, so load balancing is already in effect).
// we randomly pick one partition to check for the call dropping
// This is done for two reasons:
// 1. Currently there is no way to know for which subset of partitions the target host is chosen for
// if it is serving more than one partitions. This can be added, but it requires the change of public interfaces (KeyMapper) so that
// more hints can be added to the request context for the concerned the partitions
// 2. More importantly, there is no good way to check for call dropping even if the above problem is solved.
// For example, if a target host is chosen for partition 1, 5, 7, with call drop rates of 0, 0.2, 0.4 respectively
// A reasonable way to proceed would be use the highest drop rate and do the check once for the target host,
// but currently the check can only be done for each partition and only with boolean result (no access to drop rate)
// The partition to check is picked at random to be conservative.
// E.g. in the above example, we don't want to always use the drop rate of partition 1.
Map<Integer, PartitionData> partitionDataMap = uris.getPartitionDataMap(targetHost);
if (partitionDataMap == null || partitionDataMap.isEmpty()) {
die(serviceName, "There is no partition data for server host: " + targetHost + ". URI: " + requestUri);
}
Set<Integer> partitions = partitionDataMap.keySet();
Iterator<Integer> iterator = partitions.iterator();
int index = _random.nextInt(partitions.size());
for (int i = 0; i <= index; i++) {
partitionId = iterator.next();
}
}
List<TrackerClient> clientsToLoadBalance = null;
for (LoadBalancerState.SchemeStrategyPair pair : orderedStrategies) {
LoadBalancerStrategy strategy = pair.getStrategy();
String scheme = pair.getScheme();
clientsToLoadBalance = getPotentialClients(serviceName, serviceProperties, uris, scheme, partitionId);
trackerClient = strategy.getTrackerClient(request, requestContext, uriItem.getVersion(), partitionId, clientsToLoadBalance);
debug(_log, "load balancer strategy for ", serviceName, " returned: ", trackerClient);
// break as soon as we find an available cluster client
if (trackerClient != null) {
break;
}
}
if (trackerClient == null) {
if (clientsToLoadBalance == null || clientsToLoadBalance.isEmpty()) {
die(serviceName, "Service: " + serviceName + " unable to find a host to route the request" + " in partition: " + partitionId + " cluster: " + clusterName + ". Check what cluster your servers are announcing to.");
} else {
die(serviceName, "Service: " + serviceName + " is in a bad state (high latency/high error). " + "Dropping request. Cluster: " + clusterName + ", partitionId:" + partitionId + " (" + clientsToLoadBalance.size() + " hosts)");
}
}
return trackerClient;
}
use of com.linkedin.d2.balancer.util.partitions.PartitionAccessor in project rest.li by linkedin.
the class SimpleLoadBalancer method getPartitionInformation.
/**
* If given a collection of keys, the method will maps keys to partitions and
* return the servers that belongs to that partition up to limitHostPerPartition.
*
* If no keys are specified, the method will return hosts in all partitions
*
* @param serviceUri for example d2://articles
* @param keys all the keys we want to find the partition for
* @param limitHostPerPartition the number of hosts that we should return for this partition. Must be larger than 0.
* @param hash this will be used to create Iterator for the hosts in the hash ring
* @return Number of hosts in requested partitions. See {@link com.linkedin.d2.balancer.util.HostToKeyMapper} for more details.
* @throws ServiceUnavailableException
*/
@Override
public <K> HostToKeyMapper<K> getPartitionInformation(URI serviceUri, Collection<K> keys, int limitHostPerPartition, int hash) throws ServiceUnavailableException {
if (limitHostPerPartition <= 0) {
throw new IllegalArgumentException("limitHostPartition cannot be 0 or less");
}
ServiceProperties service = listenToServiceAndCluster(serviceUri);
String serviceName = service.getServiceName();
String clusterName = service.getClusterName();
ClusterProperties cluster = getClusterProperties(serviceName, clusterName);
LoadBalancerStateItem<UriProperties> uriItem = getUriItem(serviceName, clusterName, cluster);
UriProperties uris = uriItem.getProperty();
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = _state.getStrategiesForService(serviceName, service.getPrioritizedSchemes());
Map<Integer, Integer> partitionWithoutEnoughHost = new HashMap<Integer, Integer>();
if (!orderedStrategies.isEmpty()) {
// get the partitionId -> keys mapping
final PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
int maxPartitionId = accessor.getMaxPartitionId();
List<K> unmappedKeys = new ArrayList<K>();
Map<Integer, Set<K>> partitionSet = getPartitionSet(keys, accessor, unmappedKeys);
final LoadBalancerState.SchemeStrategyPair pair = orderedStrategies.get(0);
//get the partitionId -> host URIs list
Map<Integer, KeysAndHosts<K>> partitionDataMap = new HashMap<Integer, KeysAndHosts<K>>();
for (Integer partitionId : partitionSet.keySet()) {
Set<URI> possibleUris = uris.getUriBySchemeAndPartition(pair.getScheme(), partitionId);
List<TrackerClient> trackerClients = getPotentialClients(serviceName, service, possibleUris);
int size = trackerClients.size() <= limitHostPerPartition ? trackerClients.size() : limitHostPerPartition;
List<URI> rankedUri = new ArrayList<URI>(size);
Ring<URI> ring = pair.getStrategy().getRing(uriItem.getVersion(), partitionId, trackerClients);
Iterator<URI> iterator = ring.getIterator(hash);
while (iterator.hasNext() && rankedUri.size() < size) {
URI uri = iterator.next();
if (!rankedUri.contains(uri)) {
rankedUri.add(uri);
}
}
if (rankedUri.size() < limitHostPerPartition) {
partitionWithoutEnoughHost.put(partitionId, limitHostPerPartition - rankedUri.size());
}
KeysAndHosts<K> keysAndHosts = new KeysAndHosts<K>(partitionSet.get(partitionId), rankedUri);
partitionDataMap.put(partitionId, keysAndHosts);
}
return new HostToKeyMapper<K>(unmappedKeys, partitionDataMap, limitHostPerPartition, maxPartitionId + 1, partitionWithoutEnoughHost);
} else {
throw new ServiceUnavailableException(serviceName, "Unable to find a load balancer strategy");
}
}
use of com.linkedin.d2.balancer.util.partitions.PartitionAccessor in project rest.li by linkedin.
the class SimpleLoadBalancer method getRings.
@Override
public <K> MapKeyResult<Ring<URI>, K> getRings(URI serviceUri, Iterable<K> keys) throws ServiceUnavailableException {
ServiceProperties service = listenToServiceAndCluster(serviceUri);
String serviceName = service.getServiceName();
String clusterName = service.getClusterName();
ClusterProperties cluster = getClusterProperties(serviceName, clusterName);
LoadBalancerStateItem<UriProperties> uriItem = getUriItem(serviceName, clusterName, cluster);
UriProperties uris = uriItem.getProperty();
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = _state.getStrategiesForService(serviceName, service.getPrioritizedSchemes());
if (!orderedStrategies.isEmpty()) {
LoadBalancerState.SchemeStrategyPair pair = orderedStrategies.get(0);
PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
// first distribute keys to partitions
Map<Integer, Set<K>> partitionSet = new HashMap<Integer, Set<K>>();
List<MapKeyResult.UnmappedKey<K>> unmappedKeys = new ArrayList<MapKeyResult.UnmappedKey<K>>();
for (final K key : keys) {
int partitionId;
try {
partitionId = accessor.getPartitionId(key.toString());
} catch (PartitionAccessException e) {
unmappedKeys.add(new MapKeyResult.UnmappedKey<K>(key, MapKeyResult.ErrorType.FAIL_TO_FIND_PARTITION));
continue;
}
Set<K> set = partitionSet.get(partitionId);
if (set == null) {
set = new HashSet<K>();
partitionSet.put(partitionId, set);
}
set.add(key);
}
// then we find the ring for each partition and create a map of Ring<URI> to Set<K>
final Map<Ring<URI>, Collection<K>> ringMap = new IdentityHashMap<Ring<URI>, Collection<K>>(partitionSet.size() * 2);
for (Map.Entry<Integer, Set<K>> entry : partitionSet.entrySet()) {
int partitionId = entry.getKey();
List<TrackerClient> clients = getPotentialClients(serviceName, service, uris, pair.getScheme(), partitionId);
Ring<URI> ring = pair.getStrategy().getRing(uriItem.getVersion(), partitionId, clients);
// make sure the same ring is not used in other partition
Object oldValue = ringMap.put(ring, entry.getValue());
assert (oldValue == null);
}
return new MapKeyResult<Ring<URI>, K>(ringMap, unmappedKeys);
} else {
throw new ServiceUnavailableException(serviceName, "Unable to find a load balancer strategy");
}
}
use of com.linkedin.d2.balancer.util.partitions.PartitionAccessor in project rest.li by linkedin.
the class RetryClientTest method prepareLoadBalancer.
public SimpleLoadBalancer prepareLoadBalancer(List<String> uris) throws URISyntaxException {
String serviceName = "retryService";
String clusterName = "cluster";
String path = "";
String strategyName = "degrader";
// setup partition
Map<URI, Map<Integer, PartitionData>> partitionDescriptions = new HashMap<URI, Map<Integer, PartitionData>>();
for (String uri : uris) {
final URI foo = URI.create(uri);
Map<Integer, PartitionData> foo1Data = new HashMap<Integer, PartitionData>();
foo1Data.put(0, new PartitionData(1.0));
partitionDescriptions.put(foo, foo1Data);
}
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(new DegraderLoadBalancerStrategyConfig(5000), serviceName, null);
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = new ArrayList<LoadBalancerState.SchemeStrategyPair>();
orderedStrategies.add(new LoadBalancerState.SchemeStrategyPair("http", strategy));
PartitionAccessor accessor = new TestRetryPartitionAccessor();
SimpleLoadBalancer balancer = new SimpleLoadBalancer(new PartitionedLoadBalancerTestState(clusterName, serviceName, path, strategyName, partitionDescriptions, orderedStrategies, accessor));
return balancer;
}
use of com.linkedin.d2.balancer.util.partitions.PartitionAccessor in project rest.li by linkedin.
the class SimpleLoadBalancerTest method testGetPartitionInfoOrdering.
/**
* This tests the getPartitionInfo() when given a collection of keys (actually a test for KeyMapper.mapKeysV3()).
*/
@Test
public void testGetPartitionInfoOrdering() throws Exception {
String serviceName = "articles";
String clusterName = "cluster";
String path = "path";
String strategyName = "degrader";
// setup 3 partitions. Partition 1 and Partition 2 both have server1 - server3. Partition 3 only has server1.
Map<URI, Map<Integer, PartitionData>> partitionDescriptions = new HashMap<URI, Map<Integer, PartitionData>>();
final URI server1 = new URI("http://foo1.com");
Map<Integer, PartitionData> server1Data = new HashMap<Integer, PartitionData>();
server1Data.put(1, new PartitionData(1.0));
server1Data.put(2, new PartitionData(1.0));
server1Data.put(3, new PartitionData(1.0));
partitionDescriptions.put(server1, server1Data);
final URI server2 = new URI("http://foo2.com");
Map<Integer, PartitionData> server2Data = new HashMap<Integer, PartitionData>();
server2Data.put(1, new PartitionData(1.0));
server2Data.put(2, new PartitionData(1.0));
partitionDescriptions.put(server2, server2Data);
final URI server3 = new URI("http://foo3.com");
Map<Integer, PartitionData> server3Data = new HashMap<Integer, PartitionData>();
server3Data.put(1, new PartitionData(1.0));
server3Data.put(2, new PartitionData(1.0));
partitionDescriptions.put(server3, server3Data);
//setup strategy which involves tweaking the hash ring to get partitionId -> URI host
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = new ArrayList<LoadBalancerState.SchemeStrategyPair>();
LoadBalancerStrategy strategy = new TestLoadBalancerStrategy(partitionDescriptions);
orderedStrategies.add(new LoadBalancerState.SchemeStrategyPair("http", strategy));
//setup the partition accessor which can only map keys from 1 - 3.
PartitionAccessor accessor = new TestPartitionAccessor();
URI serviceURI = new URI("d2://" + serviceName);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(new PartitionedLoadBalancerTestState(clusterName, serviceName, path, strategyName, partitionDescriptions, orderedStrategies, accessor));
List<Integer> keys = new ArrayList<Integer>();
keys.add(1);
keys.add(2);
keys.add(3);
keys.add(123);
HostToKeyMapper<Integer> result = balancer.getPartitionInformation(serviceURI, keys, 3, 123);
Assert.assertEquals(result.getLimitHostPerPartition(), 3);
Assert.assertEquals(1, result.getUnmappedKeys().size());
Assert.assertEquals(123, (int) result.getUnmappedKeys().iterator().next().getKey());
//partition 0 should be null
Assert.assertNull(result.getPartitionInfoMap().get(0));
// results for partition 1 should contain server1, server2 and server3
KeysAndHosts<Integer> keysAndHosts1 = result.getPartitionInfoMap().get(1);
Assert.assertTrue(keysAndHosts1.getKeys().size() == 1);
Assert.assertTrue(keysAndHosts1.getKeys().iterator().next() == 1);
List<URI> ordering1 = keysAndHosts1.getHosts();
// results for partition 2 should be the same as partition1.
KeysAndHosts<Integer> keysAndHosts2 = result.getPartitionInfoMap().get(2);
Assert.assertTrue(keysAndHosts2.getKeys().size() == 1);
Assert.assertTrue(keysAndHosts2.getKeys().iterator().next() == 2);
List<URI> ordering2 = keysAndHosts2.getHosts();
//for partition 3
KeysAndHosts<Integer> keysAndHosts3 = result.getPartitionInfoMap().get(3);
Assert.assertTrue(keysAndHosts3.getKeys().size() == 1);
Assert.assertTrue(keysAndHosts3.getKeys().iterator().next() == 3);
List<URI> ordering3 = keysAndHosts3.getHosts();
// Just compare the size and contents of the list, not the ordering.
Assert.assertTrue(ordering1.size() == 3);
List<URI> allServers = new ArrayList<>();
allServers.add(server1);
allServers.add(server2);
allServers.add(server3);
Assert.assertTrue(ordering1.containsAll(allServers));
Assert.assertTrue(ordering2.containsAll(allServers));
Assert.assertEquals(ordering1, ordering2);
Assert.assertEquals(ordering3.get(0), server1);
Assert.assertTrue(result.getPartitionsWithoutEnoughHosts().containsKey(3));
Assert.assertEquals((int) result.getPartitionsWithoutEnoughHosts().get(3), 2);
}
Aggregations