use of com.linkedin.d2.balancer.strategies.LoadBalancerStrategy in project rest.li by linkedin.
the class D2ClientBuilder method createDefaultLoadBalancerStrategyFactories.
/**
* Adds the default load balancer strategy factories only if they are not present in the provided factories
* during the transition period.
*
* @return Default mapping of the load balancer strategy names and the strategies
*/
private Map<String, LoadBalancerStrategyFactory<?>> createDefaultLoadBalancerStrategyFactories() {
final Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>(_config.loadBalancerStrategyFactories);
final RandomLoadBalancerStrategyFactory randomStrategyFactory = new RandomLoadBalancerStrategyFactory();
loadBalancerStrategyFactories.putIfAbsent("random", randomStrategyFactory);
final DegraderLoadBalancerStrategyFactoryV3 degraderStrategyFactoryV3 = new DegraderLoadBalancerStrategyFactoryV3(_config.healthCheckOperations, _config._executorService, _config.eventEmitter, Collections.emptyList());
loadBalancerStrategyFactories.putIfAbsent("degrader", degraderStrategyFactoryV3);
loadBalancerStrategyFactories.putIfAbsent("degraderV2", degraderStrategyFactoryV3);
loadBalancerStrategyFactories.putIfAbsent("degraderV3", degraderStrategyFactoryV3);
loadBalancerStrategyFactories.putIfAbsent("degraderV2_1", degraderStrategyFactoryV3);
if (_config.enableRelativeLoadBalancer) {
final RelativeLoadBalancerStrategyFactory relativeLoadBalancerStrategyFactory = new RelativeLoadBalancerStrategyFactory(_config._executorService, _config.healthCheckOperations, Collections.emptyList(), _config.eventEmitter, SystemClock.instance());
loadBalancerStrategyFactories.putIfAbsent(RelativeLoadBalancerStrategy.RELATIVE_LOAD_BALANCER_STRATEGY_NAME, relativeLoadBalancerStrategyFactory);
}
return loadBalancerStrategyFactories;
}
use of com.linkedin.d2.balancer.strategies.LoadBalancerStrategy in project rest.li by linkedin.
the class SimpleLoadBalancer method chooseTrackerClient.
private TrackerClient chooseTrackerClient(Request request, RequestContext requestContext, String serviceName, String clusterName, ClusterProperties cluster, LoadBalancerStateItem<UriProperties> uriItem, UriProperties uris, List<LoadBalancerState.SchemeStrategyPair> orderedStrategies, ServiceProperties serviceProperties) throws ServiceUnavailableException {
// now try and find a tracker client for the uri
TrackerClient trackerClient = null;
URI targetHost = KeyMapper.TargetHostHints.getRequestContextTargetHost(requestContext);
int partitionId = -1;
URI requestUri = request.getURI();
if (targetHost == null) {
PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
try {
partitionId = accessor.getPartitionId(requestUri);
} catch (PartitionAccessException e) {
die(serviceName, "PEGA_1013. Error in finding the partition for URI: " + requestUri + ", " + "in cluster: " + clusterName + ", " + e.getMessage());
}
} else {
// This is the case of scatter/gather or search, where the target host may be chosen to be responsible for
// more than one partitions (The target host was picked from a consistent hash ring, so load balancing is already in effect).
// we randomly pick one partition to check for the call dropping
// This is done for two reasons:
// 1. Currently there is no way to know for which subset of partitions the target host is chosen for
// if it is serving more than one partitions. This can be added, but it requires the change of public interfaces (KeyMapper) so that
// more hints can be added to the request context for the concerned the partitions
// 2. More importantly, there is no good way to check for call dropping even if the above problem is solved.
// For example, if a target host is chosen for partition 1, 5, 7, with call drop rates of 0, 0.2, 0.4 respectively
// A reasonable way to proceed would be use the highest drop rate and do the check once for the target host,
// but currently the check can only be done for each partition and only with boolean result (no access to drop rate)
// The partition to check is picked at random to be conservative.
// E.g. in the above example, we don't want to always use the drop rate of partition 1.
Map<Integer, PartitionData> partitionDataMap = uris.getPartitionDataMap(targetHost);
if (partitionDataMap == null || partitionDataMap.isEmpty()) {
die(serviceName, "PEGA_1014. There is no partition data for server host: " + targetHost + ". URI: " + requestUri);
}
Set<Integer> partitions = partitionDataMap.keySet();
Iterator<Integer> iterator = partitions.iterator();
int index = _random.nextInt(partitions.size());
for (int i = 0; i <= index; i++) {
partitionId = iterator.next();
}
}
Map<URI, TrackerClient> clientsToLoadBalance = null;
for (LoadBalancerState.SchemeStrategyPair pair : orderedStrategies) {
LoadBalancerStrategy strategy = pair.getStrategy();
String scheme = pair.getScheme();
TrackerClientSubsetItem subsetItem = getPotentialClients(serviceName, serviceProperties, cluster, uris, scheme, partitionId, uriItem.getVersion());
clientsToLoadBalance = subsetItem.getWeightedSubset();
trackerClient = strategy.getTrackerClient(request, requestContext, uriItem.getVersion(), partitionId, clientsToLoadBalance, subsetItem.shouldForceUpdate());
debug(_log, "load balancer strategy for ", serviceName, " returned: ", trackerClient);
// break as soon as we find an available cluster client
if (trackerClient != null) {
break;
}
}
if (trackerClient == null) {
if (clientsToLoadBalance == null || clientsToLoadBalance.isEmpty()) {
String requestedSchemes = orderedStrategies.stream().map(LoadBalancerState.SchemeStrategyPair::getScheme).collect(Collectors.joining(","));
die(serviceName, "PEGA_1015. Service: " + serviceName + " unable to find a host to route the request" + " in partition: " + partitionId + " cluster: " + clusterName + " scheme: [" + requestedSchemes + "]," + " total hosts in cluster: " + uris.Uris().size() + "." + " Check what cluster and scheme your servers are announcing to.");
} else {
die(serviceName, "PEGA_1016. Service: " + serviceName + " is in a bad state (high latency/high error). " + "Dropping request. Cluster: " + clusterName + ", partitionId:" + partitionId + " (choosable: " + clientsToLoadBalance.size() + " hosts, total in cluster: " + uris.Uris().size() + ")");
}
}
return trackerClient;
}
use of com.linkedin.d2.balancer.strategies.LoadBalancerStrategy in project rest.li by linkedin.
the class SimpleLoadBalancerState method shutdown.
@Override
public void shutdown(final PropertyEventShutdownCallback shutdown) {
trace(_log, "shutdown");
// shutdown all three registries, all tracker clients, and the event thread
_executor.execute(new PropertyEvent("shutdown load balancer state") {
@Override
public void innerRun() {
// Need to shutdown loadBalancerStrategies before the transportClients are shutdown
for (Map<String, LoadBalancerStrategy> strategyEntry : _serviceStrategies.values()) {
strategyEntry.values().forEach(LoadBalancerStrategy::shutdown);
}
// put all tracker clients into a single set for convenience
Set<TransportClient> transportClients = new HashSet<>();
for (Map<String, TransportClient> clientsByScheme : _serviceClients.values()) {
transportClients.addAll(clientsByScheme.values());
}
Callback<None> trackerCallback = Callbacks.countDown(Callbacks.<None>adaptSimple(new SimpleCallback() {
@Override
public void onDone() {
shutdown.done();
}
}), transportClients.size());
info(_log, "shutting down cluster clients");
for (TransportClient transportClient : transportClients) {
transportClient.shutdown(trackerCallback);
}
// so it is needed to notify all the listeners
for (SimpleLoadBalancerStateListener listener : _listeners) {
// Notify the strategy removal
for (Map.Entry<String, Map<String, LoadBalancerStrategy>> serviceStrategy : _serviceStrategies.entrySet()) {
for (Map.Entry<String, LoadBalancerStrategy> strategyEntry : serviceStrategy.getValue().entrySet()) {
listener.onStrategyRemoved(serviceStrategy.getKey(), strategyEntry.getKey(), strategyEntry.getValue());
}
// Also notify the client removal
Map<URI, TrackerClient> trackerClients = _trackerClients.get(serviceStrategy.getKey());
if (trackerClients != null) {
for (TrackerClient client : trackerClients.values()) {
listener.onClientRemoved(serviceStrategy.getKey(), client);
}
}
}
}
// When SimpleLoadBalancerStateis shutdown, all the cluster listener also need to be notified.
for (LoadBalancerClusterListener clusterListener : _clusterListeners) {
for (String clusterName : _clusterInfo.keySet()) {
clusterListener.onClusterRemoved(clusterName);
}
}
}
});
}
use of com.linkedin.d2.balancer.strategies.LoadBalancerStrategy in project rest.li by linkedin.
the class SimpleLoadBalancerState method getStrategiesForService.
@Override
public List<SchemeStrategyPair> getStrategiesForService(String serviceName, List<String> prioritizedSchemes) {
List<SchemeStrategyPair> cached = _serviceStrategiesCache.get(serviceName);
if ((cached != null) && !cached.isEmpty()) {
return cached;
} else {
List<SchemeStrategyPair> orderedStrategies = new ArrayList<>(prioritizedSchemes.size());
for (String scheme : prioritizedSchemes) {
// if this scheme is not supported (ie https not enabled) don't add it to the list
if ("https".equals(scheme) && !_isSSLEnabled) {
continue;
}
// get the strategy for this service and scheme
LoadBalancerStrategy strategy = getStrategy(serviceName, scheme);
if (strategy != null) {
orderedStrategies.add(new SchemeStrategyPair(scheme, strategy));
} else {
warn(_log, "unable to find a load balancer strategy for ", serviceName, " with scheme: ", scheme);
}
}
_serviceStrategiesCache.put(serviceName, orderedStrategies);
return orderedStrategies;
}
}
use of com.linkedin.d2.balancer.strategies.LoadBalancerStrategy in project rest.li by linkedin.
the class LoadBalancerClientCli method getZKFSLoadBalancer.
public ZKFSLoadBalancer getZKFSLoadBalancer(String zkConnectString, String d2path, String d2ServicePath) throws Exception {
_tmpDir = createTempDirectory(_tmpdirName);
ZKFSComponentFactory componentFactory = new ZKFSComponentFactory();
if (d2ServicePath == null || d2ServicePath.isEmpty()) {
d2ServicePath = "services";
}
Map<String, TransportClientFactory> clientFactories = new HashMap<>();
clientFactories.put("http", new HttpClientFactory.Builder().build());
Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<>();
loadBalancerStrategyFactories.put("random", new RandomLoadBalancerStrategyFactory());
loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
loadBalancerStrategyFactories.put("degraderV2", new DegraderLoadBalancerStrategyFactoryV3());
loadBalancerStrategyFactories.put("degraderV3", new DegraderLoadBalancerStrategyFactoryV3());
loadBalancerStrategyFactories.put("degraderV2_1", new DegraderLoadBalancerStrategyFactoryV3());
ZKFSTogglingLoadBalancerFactoryImpl factory = new ZKFSTogglingLoadBalancerFactoryImpl(componentFactory, TIMEOUT, TimeUnit.MILLISECONDS, d2path, _tmpDir.getAbsolutePath(), clientFactories, loadBalancerStrategyFactories, d2ServicePath, null, null, false);
return new ZKFSLoadBalancer(zkConnectString, SESSION_TIMEOUT, (int) TIMEOUT, factory, null, d2path);
}
Aggregations