use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class SimpleLoadBalancer method chooseTrackerClient.
private TrackerClient chooseTrackerClient(Request request, RequestContext requestContext, String serviceName, String clusterName, ClusterProperties cluster, LoadBalancerStateItem<UriProperties> uriItem, UriProperties uris, List<LoadBalancerState.SchemeStrategyPair> orderedStrategies, ServiceProperties serviceProperties) throws ServiceUnavailableException {
// now try and find a tracker client for the uri
TrackerClient trackerClient = null;
URI targetHost = KeyMapper.TargetHostHints.getRequestContextTargetHost(requestContext);
int partitionId = -1;
URI requestUri = request.getURI();
if (targetHost == null) {
PartitionAccessor accessor = getPartitionAccessor(serviceName, clusterName);
try {
partitionId = accessor.getPartitionId(requestUri);
} catch (PartitionAccessException e) {
die(serviceName, "Error in finding the partition for URI: " + requestUri + ", " + e.getMessage());
}
} else {
// This is the case of scatter/gather or search, where the target host may be chosen to be responsible for
// more than one partitions (The target host was picked from a consistent hash ring, so load balancing is already in effect).
// we randomly pick one partition to check for the call dropping
// This is done for two reasons:
// 1. Currently there is no way to know for which subset of partitions the target host is chosen for
// if it is serving more than one partitions. This can be added, but it requires the change of public interfaces (KeyMapper) so that
// more hints can be added to the request context for the concerned the partitions
// 2. More importantly, there is no good way to check for call dropping even if the above problem is solved.
// For example, if a target host is chosen for partition 1, 5, 7, with call drop rates of 0, 0.2, 0.4 respectively
// A reasonable way to proceed would be use the highest drop rate and do the check once for the target host,
// but currently the check can only be done for each partition and only with boolean result (no access to drop rate)
// The partition to check is picked at random to be conservative.
// E.g. in the above example, we don't want to always use the drop rate of partition 1.
Map<Integer, PartitionData> partitionDataMap = uris.getPartitionDataMap(targetHost);
if (partitionDataMap == null || partitionDataMap.isEmpty()) {
die(serviceName, "There is no partition data for server host: " + targetHost + ". URI: " + requestUri);
}
Set<Integer> partitions = partitionDataMap.keySet();
Iterator<Integer> iterator = partitions.iterator();
int index = _random.nextInt(partitions.size());
for (int i = 0; i <= index; i++) {
partitionId = iterator.next();
}
}
List<TrackerClient> clientsToLoadBalance = null;
for (LoadBalancerState.SchemeStrategyPair pair : orderedStrategies) {
LoadBalancerStrategy strategy = pair.getStrategy();
String scheme = pair.getScheme();
clientsToLoadBalance = getPotentialClients(serviceName, serviceProperties, uris, scheme, partitionId);
trackerClient = strategy.getTrackerClient(request, requestContext, uriItem.getVersion(), partitionId, clientsToLoadBalance);
debug(_log, "load balancer strategy for ", serviceName, " returned: ", trackerClient);
// break as soon as we find an available cluster client
if (trackerClient != null) {
break;
}
}
if (trackerClient == null) {
if (clientsToLoadBalance == null || clientsToLoadBalance.isEmpty()) {
die(serviceName, "Service: " + serviceName + " unable to find a host to route the request" + " in partition: " + partitionId + " cluster: " + clusterName + ". Check what cluster your servers are announcing to.");
} else {
die(serviceName, "Service: " + serviceName + " is in a bad state (high latency/high error). " + "Dropping request. Cluster: " + clusterName + ", partitionId:" + partitionId + " (" + clientsToLoadBalance.size() + " hosts)");
}
}
return trackerClient;
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class PartitionDataFactory method createPartitionDataMap.
public static Map<Integer, PartitionData> createPartitionDataMap(Map<String, Object> sourceMap) {
Map<Integer, PartitionData> map = new HashMap<Integer, PartitionData>();
if (sourceMap != null) {
for (Map.Entry<String, Object> entry : sourceMap.entrySet()) {
@SuppressWarnings("unchecked") Map<String, Object> partitionDataMap = (Map<String, Object>) entry.getValue();
String weightStr = PropertyUtil.checkAndGetValue(partitionDataMap, "weight", String.class, "URI weight");
PartitionData data = new PartitionData(PropertyUtil.parseDouble("weight", weightStr));
map.put(PropertyUtil.parseInt("partitionId", entry.getKey()), data);
}
}
return map;
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class ZooKeeperServer method markUp.
@Override
public void markUp(final String clusterName, final URI uri, final Map<Integer, PartitionData> partitionDataMap, final Map<String, Object> uriSpecificProperties, final Callback<None> callback) {
final Callback<None> doPutCallback = new Callback<None>() {
@Override
public void onSuccess(None none) {
Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<URI, Map<Integer, PartitionData>>();
partitionDesc.put(uri, partitionDataMap);
Map<URI, Map<String, Object>> myUriSpecificProperties;
if (uriSpecificProperties != null && !uriSpecificProperties.isEmpty()) {
myUriSpecificProperties = new HashMap<URI, Map<String, Object>>();
myUriSpecificProperties.put(uri, uriSpecificProperties);
} else {
myUriSpecificProperties = Collections.emptyMap();
}
if (_log.isInfoEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append(_store);
sb.append(" marked up for cluster: ");
sb.append(clusterName);
sb.append(", uri: ");
sb.append(uri);
sb.append(", announcing [partitionId: weight]s: {");
for (final int partitionId : partitionDataMap.keySet()) {
sb.append("[");
sb.append(partitionId);
sb.append(" : ");
sb.append(partitionDataMap.get(partitionId));
sb.append("]");
}
sb.append("}");
info(_log, sb);
}
_store.put(clusterName, new UriProperties(clusterName, partitionDesc, myUriSpecificProperties), callback);
}
@Override
public void onError(Throwable e) {
callback.onError(e);
}
};
Callback<UriProperties> getCallback = new Callback<UriProperties>() {
@Override
public void onSuccess(UriProperties uris) {
if (uris != null && uris.Uris().contains(uri)) {
warn(_log, "markUp called on a uri that already exists in cluster ", clusterName, ": ", uri);
// mark down before marking up with the new weight
markDown(clusterName, uri, doPutCallback);
} else {
doPutCallback.onSuccess(None.none());
}
}
@Override
public void onError(Throwable e) {
callback.onError(e);
}
};
_store.get(clusterName, getCallback);
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class TrackerClientTest method testClientRestRequest.
@Test(groups = { "small", "back-end" })
public void testClientRestRequest() throws URISyntaxException {
URI uri = URI.create("http://test.qa.com:1234/foo");
double weight = 3d;
TestClient wrappedClient = new TestClient();
Clock clock = new SettableClock();
Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>(2);
partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(3d));
TrackerClient client = new TrackerClient(uri, partitionDataMap, wrappedClient, clock, null);
assertEquals(client.getUri(), uri);
Double clientWeight = client.getPartitionWeight(DefaultPartitionAccessor.DEFAULT_PARTITION_ID);
assertEquals(clientWeight, weight);
assertEquals(client.getWrappedClient(), wrappedClient);
RestRequest restRequest = new RestRequestBuilder(uri).build();
Map<String, String> restWireAttrs = new HashMap<String, String>();
TestTransportCallback<RestResponse> restCallback = new TestTransportCallback<RestResponse>();
client.restRequest(restRequest, new RequestContext(), restWireAttrs, restCallback);
assertFalse(restCallback.response.hasError());
assertEquals(wrappedClient.restRequest, restRequest);
assertEquals(wrappedClient.restWireAttrs, restWireAttrs);
}
use of com.linkedin.d2.balancer.properties.PartitionData in project rest.li by linkedin.
the class TrackerClientTest method createTrackerClient.
private TrackerClient createTrackerClient(TransportClient tc, Clock clock, URI uri) {
double weight = 3d;
Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>(2);
partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(3d));
DegraderImpl.Config config = new DegraderImpl.Config();
config.setHighErrorRate(0.1);
config.setLowErrorRate(0.0);
config.setMinCallCount(1);
return new TrackerClient(uri, partitionDataMap, tc, clock, config);
}
Aggregations