use of com.netflix.loadbalancer.reactive.ExecutionInfo in project java-chassis by ServiceComb.
the class LoadbalanceHandler method sendWithRetry.
private void sendWithRetry(Invocation invocation, AsyncResponse asyncResp, final LoadBalancer choosenLB) throws Exception {
long time = System.currentTimeMillis();
// retry in loadbalance, 2.0 feature
final int currentHandler = invocation.getHandlerIndex();
final SyncResponseExecutor orginExecutor;
final Executor newExecutor;
if (invocation.getResponseExecutor() instanceof SyncResponseExecutor) {
orginExecutor = (SyncResponseExecutor) invocation.getResponseExecutor();
newExecutor = new Executor() {
@Override
public void execute(Runnable command) {
// retry的场景,对于同步调用, 需要在网络线程中进行。同步调用的主线程已经被挂起,无法再主线程中进行重试。
command.run();
}
};
invocation.setResponseExecutor(newExecutor);
} else {
orginExecutor = null;
newExecutor = null;
}
ExecutionListener<Invocation, Response> listener = new ExecutionListener<Invocation, Response>() {
@Override
public void onExecutionStart(ExecutionContext<Invocation> context) throws AbortExecutionException {
}
@Override
public void onStartWithServer(ExecutionContext<Invocation> context, ExecutionInfo info) throws AbortExecutionException {
}
@Override
public void onExceptionWithServer(ExecutionContext<Invocation> context, Throwable exception, ExecutionInfo info) {
LOGGER.error("onExceptionWithServer msg {}; server {}", exception.getMessage(), context.getRequest().getEndpoint());
}
@Override
public void onExecutionSuccess(ExecutionContext<Invocation> context, Response response, ExecutionInfo info) {
if (orginExecutor != null) {
orginExecutor.execute(() -> {
asyncResp.complete(response);
});
} else {
asyncResp.complete(response);
}
}
@Override
public void onExecutionFailed(ExecutionContext<Invocation> context, Throwable finalException, ExecutionInfo info) {
if (orginExecutor != null) {
orginExecutor.execute(() -> {
asyncResp.consumerFail(finalException);
});
} else {
asyncResp.consumerFail(finalException);
}
}
};
List<ExecutionListener<Invocation, Response>> listeners = new ArrayList<>(0);
listeners.add(listener);
ExecutionContext<Invocation> context = new ExecutionContext<>(invocation, null, null, null);
LoadBalancerCommand<Response> command = LoadBalancerCommand.<Response>builder().withLoadBalancer(choosenLB).withServerLocator(invocation).withRetryHandler(new DefaultLoadBalancerRetryHandler(Configuration.INSTANCE.getRetryOnSame(invocation.getMicroserviceName()), Configuration.INSTANCE.getRetryOnNext(invocation.getMicroserviceName()), true)).withListeners(listeners).withExecutionContext(context).build();
Observable<Response> observable = command.submit(new ServerOperation<Response>() {
public Observable<Response> call(Server s) {
return Observable.create(f -> {
try {
((CseServer) s).setLastVisitTime(time);
choosenLB.getLoadBalancerStats().incrementNumRequests(s);
invocation.setHandlerIndex(currentHandler);
invocation.setEndpoint(((CseServer) s).getEndpoint());
invocation.next(resp -> {
if (resp.isFailed()) {
LOGGER.error("service call error, msg is {}, server is {} ", ((Throwable) resp.getResult()).getMessage(), s);
choosenLB.getLoadBalancerStats().incrementSuccessiveConnectionFailureCount(s);
f.onError(resp.getResult());
} else {
choosenLB.getLoadBalancerStats().incrementActiveRequestsCount(s);
choosenLB.getLoadBalancerStats().noteResponseTime(s, (System.currentTimeMillis() - time));
f.onNext(resp);
f.onCompleted();
}
});
} catch (Exception e) {
LOGGER.error("execution error, msg is " + e.getMessage());
f.onError(e);
}
});
}
});
observable.subscribe(response -> {
}, error -> {
}, () -> {
});
}
use of com.netflix.loadbalancer.reactive.ExecutionInfo in project ribbon by Netflix.
the class ListenerTest method testAbortedExecutionOnServer.
@Test
public void testAbortedExecutionOnServer() {
IClientConfig config = DefaultClientConfigImpl.getClientConfigWithDefaultValues().withProperty(CommonClientConfigKey.ConnectTimeout, "100").withProperty(CommonClientConfigKey.MaxAutoRetries, 1).withProperty(CommonClientConfigKey.MaxAutoRetriesNextServer, 1);
HttpClientRequest<ByteBuf> request = HttpClientRequest.createGet("/testAsync/person");
Server badServer = new Server("localhost:12345");
Server badServer2 = new Server("localhost:34567");
List<Server> servers = Lists.newArrayList(badServer, badServer2);
BaseLoadBalancer lb = LoadBalancerBuilder.<Server>newBuilder().withRule(new AvailabilityFilteringRule()).withPing(new DummyPing()).buildFixedServerListLoadBalancer(servers);
IClientConfig overrideConfig = DefaultClientConfigImpl.getEmptyConfig();
TestExecutionListener listener = new TestExecutionListener(request, overrideConfig) {
@Override
public void onStartWithServer(ExecutionContext context, ExecutionInfo info) {
throw new AbortExecutionException("exit now");
}
};
List<ExecutionListener<HttpClientRequest<ByteBuf>, HttpClientResponse<ByteBuf>>> listeners = Lists.<ExecutionListener<HttpClientRequest<ByteBuf>, HttpClientResponse<ByteBuf>>>newArrayList(listener);
LoadBalancingHttpClient<ByteBuf, ByteBuf> client = RibbonTransport.newHttpClient(lb, config, new NettyHttpLoadBalancerErrorHandler(config), listeners);
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Throwable> ref = new AtomicReference<Throwable>();
client.submit(request, null, overrideConfig).subscribe(new Action1<HttpClientResponse<ByteBuf>>() {
@Override
public void call(HttpClientResponse<ByteBuf> byteBufHttpClientResponse) {
}
}, new Action1<Throwable>() {
@Override
public void call(Throwable throwable) {
ref.set(throwable);
latch.countDown();
}
});
try {
latch.await(500, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
assertTrue(ref.get() instanceof AbortExecutionException);
}
Aggregations