use of com.linkedin.common.callback.Callback in project rest.li by linkedin.
the class ConfigWriter method writeConfig.
public void writeConfig() throws ExecutionException, TimeoutException, InterruptedException {
long startTime = System.currentTimeMillis();
FutureCallback<None> callback = new FutureCallback<None>();
_store.start(callback);
callback.get(_timeout, _timeoutUnit);
final Semaphore outstandingPutSemaphore = new Semaphore(_maxOutstandingWrites);
for (final String key : _source.keySet()) {
Map<String, Object> map = merge(_source.get(key), _defaultMap);
T properties = _builder.fromMap(map);
Callback<None> putCallback = new Callback<None>() {
@Override
public void onSuccess(None none) {
outstandingPutSemaphore.release();
}
@Override
public void onError(Throwable e) {
_log.error("Put failed for {}", key, e);
outstandingPutSemaphore.release();
}
};
if (!outstandingPutSemaphore.tryAcquire(_timeout, _timeoutUnit)) {
_log.error("Put timed out for {}", key);
throw new TimeoutException();
}
_store.put(key, properties, putCallback);
}
// Wait until all puts are finished.
if (!outstandingPutSemaphore.tryAcquire(_maxOutstandingWrites, _timeout, _timeoutUnit)) {
_log.error("Put timed out with {} outstanding writes", _maxOutstandingWrites - outstandingPutSemaphore.availablePermits());
throw new TimeoutException();
}
FutureCallback<None> shutdownCallback = new FutureCallback<None>();
_store.shutdown(shutdownCallback);
shutdownCallback.get(_timeout, _timeoutUnit);
long elapsedTime = System.currentTimeMillis() - startTime;
_log.info("A total of {}.{}s elapsed to write configs to store.", elapsedTime / 1000, elapsedTime % 1000);
}
use of com.linkedin.common.callback.Callback in project rest.li by linkedin.
the class ZooKeeperServer method markUp.
@Override
public void markUp(final String clusterName, final URI uri, final Map<Integer, PartitionData> partitionDataMap, final Map<String, Object> uriSpecificProperties, final Callback<None> callback) {
final Callback<None> doPutCallback = new Callback<None>() {
@Override
public void onSuccess(None none) {
Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<URI, Map<Integer, PartitionData>>();
partitionDesc.put(uri, partitionDataMap);
Map<URI, Map<String, Object>> myUriSpecificProperties;
if (uriSpecificProperties != null && !uriSpecificProperties.isEmpty()) {
myUriSpecificProperties = new HashMap<URI, Map<String, Object>>();
myUriSpecificProperties.put(uri, uriSpecificProperties);
} else {
myUriSpecificProperties = Collections.emptyMap();
}
if (_log.isInfoEnabled()) {
StringBuilder sb = new StringBuilder();
sb.append(_store);
sb.append(" marked up for cluster: ");
sb.append(clusterName);
sb.append(", uri: ");
sb.append(uri);
sb.append(", announcing [partitionId: weight]s: {");
for (final int partitionId : partitionDataMap.keySet()) {
sb.append("[");
sb.append(partitionId);
sb.append(" : ");
sb.append(partitionDataMap.get(partitionId));
sb.append("]");
}
sb.append("}");
info(_log, sb);
}
_store.put(clusterName, new UriProperties(clusterName, partitionDesc, myUriSpecificProperties), callback);
}
@Override
public void onError(Throwable e) {
callback.onError(e);
}
};
Callback<UriProperties> getCallback = new Callback<UriProperties>() {
@Override
public void onSuccess(UriProperties uris) {
if (uris != null && uris.Uris().contains(uri)) {
warn(_log, "markUp called on a uri that already exists in cluster ", clusterName, ": ", uri);
// mark down before marking up with the new weight
markDown(clusterName, uri, doPutCallback);
} else {
doPutCallback.onSuccess(None.none());
}
}
@Override
public void onError(Throwable e) {
callback.onError(e);
}
};
_store.get(clusterName, getCallback);
}
use of com.linkedin.common.callback.Callback in project rest.li by linkedin.
the class SimpleLoadBalancerState method shutdown.
@Override
public void shutdown(final PropertyEventShutdownCallback shutdown) {
trace(_log, "shutdown");
// shutdown all three registries, all tracker clients, and the event thread
_executor.execute(new PropertyEvent("shutdown load balancer state") {
@Override
public void innerRun() {
// Need to shutdown loadBalancerStrategies before the transportClients are shutdown
for (Map<String, LoadBalancerStrategy> strategyEntry : _serviceStrategies.values()) {
strategyEntry.values().forEach(LoadBalancerStrategy::shutdown);
}
// put all tracker clients into a single set for convenience
Set<TransportClient> transportClients = new HashSet<TransportClient>();
for (Map<String, TransportClient> clientsByScheme : _serviceClients.values()) {
transportClients.addAll(clientsByScheme.values());
}
Callback<None> trackerCallback = Callbacks.countDown(Callbacks.<None>adaptSimple(new SimpleCallback() {
@Override
public void onDone() {
shutdown.done();
}
}), transportClients.size());
info(_log, "shutting down cluster clients");
for (TransportClient transportClient : transportClients) {
transportClient.shutdown(trackerCallback);
}
// so it is needed to notify all the listeners
for (SimpleLoadBalancerStateListener listener : _listeners) {
// Notify the strategy removal
for (Map.Entry<String, Map<String, LoadBalancerStrategy>> serviceStrategy : _serviceStrategies.entrySet()) {
for (Map.Entry<String, LoadBalancerStrategy> strategyEntry : serviceStrategy.getValue().entrySet()) {
listener.onStrategyRemoved(serviceStrategy.getKey(), strategyEntry.getKey(), strategyEntry.getValue());
}
// Also notify the client removal
Map<URI, TrackerClient> trackerClients = _trackerClients.get(serviceStrategy.getKey());
if (trackerClients != null) {
for (TrackerClient client : trackerClients.values()) {
listener.onClientRemoved(serviceStrategy.getKey(), client);
}
}
}
}
}
});
}
use of com.linkedin.common.callback.Callback in project rest.li by linkedin.
the class RouteLookupClient method restRequest.
@Override
public void restRequest(final RestRequest request, final RequestContext requestContext, final Callback<RestResponse> callback, String routeKey) {
String originalServiceName = request.getURI().getAuthority();
Callback<String> routeLookupCallback = new Callback<String>() {
@Override
public void onError(Throwable e) {
callback.onError(e);
}
@Override
public void onSuccess(String resultServiceName) {
RestRequest resultRequest = createNewRequestWithNewServiceName(request, resultServiceName);
_client.restRequest(resultRequest, requestContext, callback);
}
};
_routeLookup.run(originalServiceName, _routingGroup, routeKey, routeLookupCallback);
}
use of com.linkedin.common.callback.Callback in project rest.li by linkedin.
the class DegraderLoadBalancerStrategyV3 method checkQuarantineState.
/**
* checkQuarantineState decides if the D2Quarantine can be enabled or not, by health
* checking all the trackerClients once. It enables quarantine only if at least one of the
* clients return success for the checking.
*
* The reasons for this checking include:
*
* . The default method "OPTIONS" is not always enabled by the service
* . The user can config any path/method for checking. We do a sanity checking to
* make sure the configuration is correct, and service/host responds in time.
* Otherwise the host can be kept in quarantine forever if we blindly enable it.
*
* This check actually can warm up the R2 connection pool by making a connection to
* each trackerClient. However since the check happens before any real requests are sent,
* it generally takes much longer time to get the results, due to different warming up
* requirements. Therefore the checking will be retried in next update if current check
* fails.
*
* This function is supposed to be protected by the update lock.
*
* @param clients
* @param config
*/
private void checkQuarantineState(List<TrackerClientUpdater> clients, DegraderLoadBalancerStrategyConfig config) {
Callback<None> healthCheckCallback = new Callback<None>() {
@Override
public void onError(Throwable e) {
// Do nothing as the quarantine is disabled by default
_rateLimitedLogger.error("Error to enable quarantine. Health checking failed for service {}: ", _state._serviceName, e);
}
@Override
public void onSuccess(None result) {
if (_state._enableQuarantine.compareAndSet(false, true)) {
_log.info("Quarantine is enabled for service {}", _state._serviceName);
}
}
};
// Ideally we would like to healthchecking all the service hosts (ie all TrackerClients) because
// this can help to warm up the R2 connections to the service hosts, thus speed up the initial access
// speed when d2client starts to access those hosts. However this can expose/expedite the problem that
// the d2client host needs too many connections or file handles to all the hosts, when the downstream
// services have large amount of hosts. Before that problem is addressed, we limit the number of hosts
// for pre-healthchecking to a small number
clients.stream().limit(MAX_HOSTS_TO_CHECK_QUARANTINE).forEach(client -> {
try {
HealthCheck healthCheckClient = _state.getHealthCheckClient(client);
if (healthCheckClient == null) {
healthCheckClient = new HealthCheckClientBuilder().setHealthCheckOperations(config.getHealthCheckOperations()).setHealthCheckPath(config.getHealthCheckPath()).setServicePath(config.getServicePath()).setClock(config.getClock()).setLatency(config.getQuarantineLatency()).setMethod(config.getHealthCheckMethod()).setClient(client.getTrackerClient()).build();
_state.putHealthCheckClient(client, healthCheckClient);
}
healthCheckClient.checkHealth(healthCheckCallback);
} catch (URISyntaxException e) {
_log.error("Error to build healthCheckClient ", e);
}
});
// also remove the entries that the corresponding trackerClientUpdaters do not exist anymore
for (TrackerClientUpdater client : _state._healthCheckMap.keySet()) {
if (!clients.contains(client)) {
_state._healthCheckMap.remove(client);
}
}
}
Aggregations