use of java.util.concurrent.Executor in project java-chassis by ServiceComb.
the class ExecutorManager method findExecutor.
// 只会在初始化时执行,一点点重复的查找,没必要做缓存
public static Executor findExecutor(OperationMeta operationMeta) {
Executor executor = findByKey("cse.executors.Provider." + operationMeta.getSchemaQualifiedName());
if (executor != null) {
return executor;
}
// 尝试schema级别
executor = findByKey("cse.executors.Provider." + operationMeta.getSchemaMeta().getName());
if (executor != null) {
return executor;
}
executor = findByKey("cse.executors.default");
if (executor != null) {
return executor;
}
return BeanUtils.getBean("cse.executor.default");
}
use of java.util.concurrent.Executor in project java-chassis by ServiceComb.
the class MockUtil method mockAbstactRestServer.
public void mockAbstactRestServer() {
new MockUp<AbstractRestServer<HttpServletResponse>>() {
@Mock
protected RestOperationMeta findRestOperation(RestServerRequestInternal restRequest) {
RestOperationMeta restOperationMeta = Mockito.mock(RestOperationMeta.class);
OperationMeta operationMeta = Mockito.mock(OperationMeta.class);
Executor executor = Mockito.mock(Executor.class);
operationMeta.setExecutor(executor);
return restOperationMeta;
}
};
}
use of java.util.concurrent.Executor in project java-chassis by ServiceComb.
the class TestExecutors method testFixedThreadExecutor.
@Test
public void testFixedThreadExecutor() {
FixedThreadExecutor oFixedThreadExecutor = new FixedThreadExecutor();
oFixedThreadExecutor.execute(new Runnable() {
@Override
public void run() {
}
});
Map<Long, Executor> threadExectorMap = Deencapsulation.getField(oFixedThreadExecutor, "threadExectorMap");
Assert.assertEquals(true, (threadExectorMap.size() > 0));
List<Executor> executorList = Deencapsulation.getField(oFixedThreadExecutor, "executorList");
Assert.assertEquals(true, (executorList.size() > 1));
ReactiveExecutor oReactiveExecutor = new ReactiveExecutor();
oReactiveExecutor.execute(new Runnable() {
@Override
public void run() {
// TODO Auto-generated method stub
strThreadTest = "thread Ran";
}
});
Assert.assertEquals("thread Ran", strThreadTest);
}
use of java.util.concurrent.Executor in project java-chassis by ServiceComb.
the class LoadbalanceHandler method sendWithRetry.
private void sendWithRetry(Invocation invocation, AsyncResponse asyncResp, final LoadBalancer choosenLB) throws Exception {
long time = System.currentTimeMillis();
// retry in loadbalance, 2.0 feature
final int currentHandler = invocation.getHandlerIndex();
final SyncResponseExecutor orginExecutor;
final Executor newExecutor;
if (invocation.getResponseExecutor() instanceof SyncResponseExecutor) {
orginExecutor = (SyncResponseExecutor) invocation.getResponseExecutor();
newExecutor = new Executor() {
@Override
public void execute(Runnable command) {
// retry的场景,对于同步调用, 需要在网络线程中进行。同步调用的主线程已经被挂起,无法再主线程中进行重试。
command.run();
}
};
invocation.setResponseExecutor(newExecutor);
} else {
orginExecutor = null;
newExecutor = null;
}
ExecutionListener<Invocation, Response> listener = new ExecutionListener<Invocation, Response>() {
@Override
public void onExecutionStart(ExecutionContext<Invocation> context) throws AbortExecutionException {
}
@Override
public void onStartWithServer(ExecutionContext<Invocation> context, ExecutionInfo info) throws AbortExecutionException {
}
@Override
public void onExceptionWithServer(ExecutionContext<Invocation> context, Throwable exception, ExecutionInfo info) {
LOGGER.error("onExceptionWithServer msg {}; server {}", exception.getMessage(), context.getRequest().getEndpoint());
}
@Override
public void onExecutionSuccess(ExecutionContext<Invocation> context, Response response, ExecutionInfo info) {
if (orginExecutor != null) {
orginExecutor.execute(() -> {
asyncResp.complete(response);
});
} else {
asyncResp.complete(response);
}
}
@Override
public void onExecutionFailed(ExecutionContext<Invocation> context, Throwable finalException, ExecutionInfo info) {
if (orginExecutor != null) {
orginExecutor.execute(() -> {
asyncResp.consumerFail(finalException);
});
} else {
asyncResp.consumerFail(finalException);
}
}
};
List<ExecutionListener<Invocation, Response>> listeners = new ArrayList<>(0);
listeners.add(listener);
ExecutionContext<Invocation> context = new ExecutionContext<>(invocation, null, null, null);
LoadBalancerCommand<Response> command = LoadBalancerCommand.<Response>builder().withLoadBalancer(choosenLB).withServerLocator(invocation).withRetryHandler(new DefaultLoadBalancerRetryHandler(Configuration.INSTANCE.getRetryOnSame(invocation.getMicroserviceName()), Configuration.INSTANCE.getRetryOnNext(invocation.getMicroserviceName()), true)).withListeners(listeners).withExecutionContext(context).build();
Observable<Response> observable = command.submit(new ServerOperation<Response>() {
public Observable<Response> call(Server s) {
return Observable.create(f -> {
try {
((CseServer) s).setLastVisitTime(time);
choosenLB.getLoadBalancerStats().incrementNumRequests(s);
invocation.setHandlerIndex(currentHandler);
invocation.setEndpoint(((CseServer) s).getEndpoint());
invocation.next(resp -> {
if (resp.isFailed()) {
LOGGER.error("service call error, msg is {}, server is {} ", ((Throwable) resp.getResult()).getMessage(), s);
choosenLB.getLoadBalancerStats().incrementSuccessiveConnectionFailureCount(s);
f.onError(resp.getResult());
} else {
choosenLB.getLoadBalancerStats().incrementActiveRequestsCount(s);
choosenLB.getLoadBalancerStats().noteResponseTime(s, (System.currentTimeMillis() - time));
f.onNext(resp);
f.onCompleted();
}
});
} catch (Exception e) {
LOGGER.error("execution error, msg is " + e.getMessage());
f.onError(e);
}
});
}
});
observable.subscribe(response -> {
}, error -> {
}, () -> {
});
}
use of java.util.concurrent.Executor in project fast-cast by RuedigerMoeller.
the class FCPing method runPingClientASync.
// no coordinated ommission, async
public void runPingClientASync() throws InterruptedException {
final FastCast fc = initFC("pclie", "pingponglat.kson");
final FCPublisher pingserver = fc.onTransport("ping").publish(fc.getPublisherConf("pingtopic"));
final Executor ex = Executors.newSingleThreadExecutor();
final Histogram histo = new Histogram(TimeUnit.SECONDS.toNanos(10), 3);
fc.onTransport(PING_BACK_ON_SAME_TOPIC ? "ping" : "pong").subscribe(fc.getSubscriberConf("pongtopic"), new FCSubscriber() {
int msgCount;
@Override
public void messageReceived(String sender, long sequence, Bytez b, long off, int len) {
// decode bounced back ping request
PingRequest received = FSTStructFactory.getInstance().getStructPointer(b, off).cast();
if (// filter out pressure of printing histogram
msgCount > 10_000)
histo.recordValue(System.nanoTime() - received.getNanoSendTime());
msgCount++;
if (msgCount > NUM_MSG + 10_000) {
histo.outputPercentileDistribution(System.out, 1000.0);
histo.reset();
msgCount = 0;
}
}
@Override
public boolean dropped() {
return true;
}
@Override
public void senderTerminated(String senderNodeId) {
System.out.println(senderNodeId + " terminated");
}
@Override
public void senderBootstrapped(String receivesFrom, long seqNo) {
System.out.println("bootstrap " + receivesFrom);
}
});
// just create a byte[] for each struct (*)
FSTStructAllocator alloc = new FSTStructAllocator(0);
PingRequest pr = alloc.newStruct(new PingRequest());
Sleeper sl = new Sleeper();
while (true) {
// need rate limiting cause of async
sl.spinMicros(200);
pr.setNanoSendTime(System.nanoTime());
// can be sure off is 0, see (*)
pingserver.offer(null, pr.getBase(), true);
}
}
Aggregations