use of com.linkedin.d2.balancer.properties.ServiceProperties in project rest.li by linkedin.
the class SimpleLoadBalancer method chooseTrackerClient.
private TrackerClient chooseTrackerClient(Request request, RequestContext requestContext, String serviceName, String clusterName, ClusterProperties cluster, LoadBalancerStateItem<UriProperties> uriItem, UriProperties uris, List<LoadBalancerState.SchemeStrategyPair> orderedStrategies, ServiceProperties serviceProperties) throws ServiceUnavailableException {
// now try and find a tracker client for the uri
TrackerClient trackerClient = null;
URI targetHost = KeyMapper.TargetHostHints.getRequestContextTargetHost(requestContext);
int partitionId = -1;
URI requestUri = request.getURI();
if (targetHost == null) {
PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
try {
partitionId = accessor.getPartitionId(requestUri);
} catch (PartitionAccessException e) {
die(serviceName, "Error in finding the partition for URI: " + requestUri + ", " + e.getMessage());
}
} else {
// This is the case of scatter/gather or search, where the target host may be chosen to be responsible for
// more than one partitions (The target host was picked from a consistent hash ring, so load balancing is already in effect).
// we randomly pick one partition to check for the call dropping
// This is done for two reasons:
// 1. Currently there is no way to know for which subset of partitions the target host is chosen for
// if it is serving more than one partitions. This can be added, but it requires the change of public interfaces (KeyMapper) so that
// more hints can be added to the request context for the concerned the partitions
// 2. More importantly, there is no good way to check for call dropping even if the above problem is solved.
// For example, if a target host is chosen for partition 1, 5, 7, with call drop rates of 0, 0.2, 0.4 respectively
// A reasonable way to proceed would be use the highest drop rate and do the check once for the target host,
// but currently the check can only be done for each partition and only with boolean result (no access to drop rate)
// The partition to check is picked at random to be conservative.
// E.g. in the above example, we don't want to always use the drop rate of partition 1.
Map<Integer, PartitionData> partitionDataMap = uris.getPartitionDataMap(targetHost);
if (partitionDataMap == null || partitionDataMap.isEmpty()) {
die(serviceName, "There is no partition data for server host: " + targetHost + ". URI: " + requestUri);
}
Set<Integer> partitions = partitionDataMap.keySet();
Iterator<Integer> iterator = partitions.iterator();
int index = _random.nextInt(partitions.size());
for (int i = 0; i <= index; i++) {
partitionId = iterator.next();
}
}
List<TrackerClient> clientsToLoadBalance = null;
for (LoadBalancerState.SchemeStrategyPair pair : orderedStrategies) {
LoadBalancerStrategy strategy = pair.getStrategy();
String scheme = pair.getScheme();
clientsToLoadBalance = getPotentialClients(serviceName, serviceProperties, uris, scheme, partitionId);
trackerClient = strategy.getTrackerClient(request, requestContext, uriItem.getVersion(), partitionId, clientsToLoadBalance);
debug(_log, "load balancer strategy for ", serviceName, " returned: ", trackerClient);
// break as soon as we find an available cluster client
if (trackerClient != null) {
break;
}
}
if (trackerClient == null) {
if (clientsToLoadBalance == null || clientsToLoadBalance.isEmpty()) {
die(serviceName, "Service: " + serviceName + " unable to find a host to route the request" + " in partition: " + partitionId + " cluster: " + clusterName + ". Check what cluster your servers are announcing to.");
} else {
die(serviceName, "Service: " + serviceName + " is in a bad state (high latency/high error). " + "Dropping request. Cluster: " + clusterName + ", partitionId:" + partitionId + " (" + clientsToLoadBalance.size() + " hosts)");
}
}
return trackerClient;
}
use of com.linkedin.d2.balancer.properties.ServiceProperties in project rest.li by linkedin.
the class SimpleLoadBalancer method getClient.
/**
* Given a Request, returns a TransportClient that can handle requests for the Request.
*
*
* @param request
* A request whose URI is a URL of the format "d2://>servicename</optional/path".
* @param requestContext context for this request
* @return A client that can be called to retrieve data for the URN.
* @throws ServiceUnavailableException
* If the load balancer can't figure out how to reach a service for the given
* URN, an ServiceUnavailableException will be thrown.
*/
@Override
public TransportClient getClient(Request request, RequestContext requestContext) throws ServiceUnavailableException {
TransportClient client;
URI uri = request.getURI();
debug(_log, "get client for uri: ", uri);
ServiceProperties service = listenToServiceAndCluster(uri);
String serviceName = service.getServiceName();
String clusterName = service.getClusterName();
ClusterProperties cluster = getClusterProperties(serviceName, clusterName);
// Check if we want to override the service URL and bypass choosing among the existing
// tracker clients. This is useful when the service we want is not announcing itself to
// the cluster, ie a private service for a set of clients.
URI targetService = LoadBalancerUtil.TargetHints.getRequestContextTargetService(requestContext);
if (targetService == null) {
LoadBalancerStateItem<UriProperties> uriItem = getUriItem(serviceName, clusterName, cluster);
UriProperties uris = uriItem.getProperty();
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = _state.getStrategiesForService(serviceName, service.getPrioritizedSchemes());
TrackerClient trackerClient = chooseTrackerClient(request, requestContext, serviceName, clusterName, cluster, uriItem, uris, orderedStrategies, service);
String clusterAndServiceUriString = trackerClient.getUri() + service.getPath();
client = new RewriteClient(serviceName, URI.create(clusterAndServiceUriString), trackerClient);
_serviceAvailableStats.inc();
} else {
_log.debug("service hint found, using generic client for target: {}", targetService);
TransportClient transportClient = _state.getClient(serviceName, targetService.getScheme());
client = new RewriteClient(serviceName, targetService, transportClient);
}
return client;
}
use of com.linkedin.d2.balancer.properties.ServiceProperties in project rest.li by linkedin.
the class SimpleLoadBalancer method getPartitionInformation.
/**
* If given a collection of keys, the method will maps keys to partitions and
* return the servers that belongs to that partition up to limitHostPerPartition.
*
* If no keys are specified, the method will return hosts in all partitions
*
* @param serviceUri for example d2://articles
* @param keys all the keys we want to find the partition for
* @param limitHostPerPartition the number of hosts that we should return for this partition. Must be larger than 0.
* @param hash this will be used to create Iterator for the hosts in the hash ring
* @return Number of hosts in requested partitions. See {@link com.linkedin.d2.balancer.util.HostToKeyMapper} for more details.
* @throws ServiceUnavailableException
*/
@Override
public <K> HostToKeyMapper<K> getPartitionInformation(URI serviceUri, Collection<K> keys, int limitHostPerPartition, int hash) throws ServiceUnavailableException {
if (limitHostPerPartition <= 0) {
throw new IllegalArgumentException("limitHostPartition cannot be 0 or less");
}
ServiceProperties service = listenToServiceAndCluster(serviceUri);
String serviceName = service.getServiceName();
String clusterName = service.getClusterName();
ClusterProperties cluster = getClusterProperties(serviceName, clusterName);
LoadBalancerStateItem<UriProperties> uriItem = getUriItem(serviceName, clusterName, cluster);
UriProperties uris = uriItem.getProperty();
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = _state.getStrategiesForService(serviceName, service.getPrioritizedSchemes());
Map<Integer, Integer> partitionWithoutEnoughHost = new HashMap<Integer, Integer>();
if (!orderedStrategies.isEmpty()) {
// get the partitionId -> keys mapping
final PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
int maxPartitionId = accessor.getMaxPartitionId();
List<K> unmappedKeys = new ArrayList<K>();
Map<Integer, Set<K>> partitionSet = getPartitionSet(keys, accessor, unmappedKeys);
final LoadBalancerState.SchemeStrategyPair pair = orderedStrategies.get(0);
//get the partitionId -> host URIs list
Map<Integer, KeysAndHosts<K>> partitionDataMap = new HashMap<Integer, KeysAndHosts<K>>();
for (Integer partitionId : partitionSet.keySet()) {
Set<URI> possibleUris = uris.getUriBySchemeAndPartition(pair.getScheme(), partitionId);
List<TrackerClient> trackerClients = getPotentialClients(serviceName, service, possibleUris);
int size = trackerClients.size() <= limitHostPerPartition ? trackerClients.size() : limitHostPerPartition;
List<URI> rankedUri = new ArrayList<URI>(size);
Ring<URI> ring = pair.getStrategy().getRing(uriItem.getVersion(), partitionId, trackerClients);
Iterator<URI> iterator = ring.getIterator(hash);
while (iterator.hasNext() && rankedUri.size() < size) {
URI uri = iterator.next();
if (!rankedUri.contains(uri)) {
rankedUri.add(uri);
}
}
if (rankedUri.size() < limitHostPerPartition) {
partitionWithoutEnoughHost.put(partitionId, limitHostPerPartition - rankedUri.size());
}
KeysAndHosts<K> keysAndHosts = new KeysAndHosts<K>(partitionSet.get(partitionId), rankedUri);
partitionDataMap.put(partitionId, keysAndHosts);
}
return new HostToKeyMapper<K>(unmappedKeys, partitionDataMap, limitHostPerPartition, maxPartitionId + 1, partitionWithoutEnoughHost);
} else {
throw new ServiceUnavailableException(serviceName, "Unable to find a load balancer strategy");
}
}
use of com.linkedin.d2.balancer.properties.ServiceProperties in project rest.li by linkedin.
the class SimpleLoadBalancer method getRings.
@Override
public <K> MapKeyResult<Ring<URI>, K> getRings(URI serviceUri, Iterable<K> keys) throws ServiceUnavailableException {
ServiceProperties service = listenToServiceAndCluster(serviceUri);
String serviceName = service.getServiceName();
String clusterName = service.getClusterName();
ClusterProperties cluster = getClusterProperties(serviceName, clusterName);
LoadBalancerStateItem<UriProperties> uriItem = getUriItem(serviceName, clusterName, cluster);
UriProperties uris = uriItem.getProperty();
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = _state.getStrategiesForService(serviceName, service.getPrioritizedSchemes());
if (!orderedStrategies.isEmpty()) {
LoadBalancerState.SchemeStrategyPair pair = orderedStrategies.get(0);
PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
// first distribute keys to partitions
Map<Integer, Set<K>> partitionSet = new HashMap<Integer, Set<K>>();
List<MapKeyResult.UnmappedKey<K>> unmappedKeys = new ArrayList<MapKeyResult.UnmappedKey<K>>();
for (final K key : keys) {
int partitionId;
try {
partitionId = accessor.getPartitionId(key.toString());
} catch (PartitionAccessException e) {
unmappedKeys.add(new MapKeyResult.UnmappedKey<K>(key, MapKeyResult.ErrorType.FAIL_TO_FIND_PARTITION));
continue;
}
Set<K> set = partitionSet.get(partitionId);
if (set == null) {
set = new HashSet<K>();
partitionSet.put(partitionId, set);
}
set.add(key);
}
// then we find the ring for each partition and create a map of Ring<URI> to Set<K>
final Map<Ring<URI>, Collection<K>> ringMap = new IdentityHashMap<Ring<URI>, Collection<K>>(partitionSet.size() * 2);
for (Map.Entry<Integer, Set<K>> entry : partitionSet.entrySet()) {
int partitionId = entry.getKey();
List<TrackerClient> clients = getPotentialClients(serviceName, service, uris, pair.getScheme(), partitionId);
Ring<URI> ring = pair.getStrategy().getRing(uriItem.getVersion(), partitionId, clients);
// make sure the same ring is not used in other partition
Object oldValue = ringMap.put(ring, entry.getValue());
assert (oldValue == null);
}
return new MapKeyResult<Ring<URI>, K>(ringMap, unmappedKeys);
} else {
throw new ServiceUnavailableException(serviceName, "Unable to find a load balancer strategy");
}
}
use of com.linkedin.d2.balancer.properties.ServiceProperties in project rest.li by linkedin.
the class SimpleLoadBalancerState method refreshTransportClientsPerService.
void refreshTransportClientsPerService(ServiceProperties serviceProperties) {
String serviceName = serviceProperties.getServiceName();
//create new TransportClients
Map<String, TransportClient> newTransportClients = createAndInsertTransportClientTo(serviceProperties);
// clients-by-scheme map is never edited, only replaced.
newTransportClients = Collections.unmodifiableMap(newTransportClients);
final Map<String, TransportClient> oldTransportClients = _serviceClients.put(serviceName, newTransportClients);
// gets the information for configuring the parameter for how DegraderImpl should behave for
// each tracker clients that we instantiate here. If there's no such information, then we'll instantiate
// each tracker clients with default configuration
DegraderImpl.Config config = null;
if (serviceProperties.getDegraderProperties() != null && !serviceProperties.getDegraderProperties().isEmpty()) {
config = DegraderConfigFactory.toDegraderConfig(serviceProperties.getDegraderProperties());
} else {
debug(_log, "trying to see if there's a special degraderImpl properties but serviceInfo.getDegraderImpl() is null" + " for service name = " + serviceName + " so we'll set config to default");
}
Clock clk = SystemClock.instance();
if (serviceProperties.getLoadBalancerStrategyProperties() != null) {
Map<String, Object> loadBalancerStrategyProperties = serviceProperties.getLoadBalancerStrategyProperties();
clk = MapUtil.getWithDefault(loadBalancerStrategyProperties, PropertyKeys.CLOCK, SystemClock.instance(), Clock.class);
}
Map<URI, TrackerClient> newTrackerClients;
// update all tracker clients to use new configs
LoadBalancerStateItem<UriProperties> uriItem = _uriProperties.get(serviceProperties.getClusterName());
UriProperties uriProperties = uriItem == null ? null : uriItem.getProperty();
if (uriProperties != null) {
Set<URI> uris = uriProperties.Uris();
// clients-by-uri map may be edited later by UriPropertiesListener.handlePut
newTrackerClients = new ConcurrentHashMap<URI, TrackerClient>(CollectionUtils.getMapInitialCapacity(uris.size(), 0.75f), 0.75f, 1);
long trackerClientInterval = getTrackerClientInterval(serviceProperties);
String errorStatusPattern = getErrorStatusPattern(serviceProperties);
for (URI uri : uris) {
TrackerClient trackerClient = getTrackerClient(serviceName, uri, uriProperties.getPartitionDataMap(uri), config, clk, trackerClientInterval, errorStatusPattern);
if (trackerClient != null) {
newTrackerClients.put(uri, trackerClient);
}
}
} else {
// clients-by-uri map may be edited later by UriPropertiesListener.handlePut
newTrackerClients = new ConcurrentHashMap<URI, TrackerClient>(16, 0.75f, 1);
}
//override the oldTrackerClients with newTrackerClients
_trackerClients.put(serviceName, newTrackerClients);
// No need to shut down oldTrackerClients, because they all point directly to the TransportClient for the service
// We do need to shut down the old transport clients
shutdownTransportClients(oldTransportClients, serviceName);
}
Aggregations