use of ru.fix.completable.reactor.runtime.execution.ReactorGraphExecution in project completable-reactor by ru-fix.
the class CompletableReactor method internalSubmit.
/**
* Submit request without checking whether reactor closed or not.
* If maxPendingRequestCount limit is reached prints error message and accepts request.
*
* @param payload
* @param timeoutMs
* @param <PayloadType>
* @return
*/
private <PayloadType> Execution<PayloadType> internalSubmit(PayloadType payload, long timeoutMs) {
if (pendingRequestCount.get() > maxPendingRequestCount.get()) {
log.error("Max pending request count is reached. Request will be accepted but there is a possibility of " + "OOM or something wrong with back pressure logic in client code.\n" + "Use trySubmit method that supports back pressure or correctly handle the load on " + "CompletableReactor on client side.");
}
ProfiledCall payloadCall = profiler.profiledCall("" + ProfilerNames.PAYLOAD + "." + payload.getClass().getSimpleName()).start();
ProfiledCall executionCall = profiler.profiledCall("" + ProfilerNames.EXECUTION + "." + payload.getClass().getSimpleName()).start();
/**
* Inline graph execution scenario
*/
Function inlineGraphFunction = inlinePayloadGraphs.get(payload.getClass());
if (inlineGraphFunction != null) {
CompletableFuture<PayloadType> inlineGraphResult = (CompletableFuture<PayloadType>) inlineGraphFunction.apply(payload);
inlineGraphResult.thenAcceptAsync(any -> {
payloadCall.stop();
executionCall.stop();
});
return new Execution<>(inlineGraphResult, inlineGraphResult.thenAccept(any -> {
/* do nothing */
}), null);
}
/**
* Standard graph execution scenario
*/
ReactorGraphExecution<PayloadType> execution;
ReactorGraph<PayloadType> graph = payloadGraphs.get(payload.getClass());
if (graph != null) {
execution = executionBuilder.build(graph);
} else {
GlGraph glGraph = glPayloadGraphs.get(payload.getClass());
if (glGraph != null) {
execution = glExecutionBuilder.build(glGraph);
} else {
throw new IllegalArgumentException(String.format("Rector graph not found for payload %s", payload.getClass()));
}
}
/**
* Handling pending request counts
*/
pendingRequestCount.incrementAndGet();
PayloadStatCounters statistics = payloadStatCounters.computeIfAbsent(payload.getClass(), key -> new PayloadStatCounters());
statistics.runningTotal.increment();
execution.getChainExecutionFuture().handleAsync((result, thr) -> {
statistics.runningTotal.decrement();
return null;
});
execution.getResultFuture().handleAsync((result, thr) -> {
statistics.runningWithoutResult.decrement();
return null;
});
/**
* Launching chain execution
*/
execution.getSubmitFuture().complete(payload);
// TODO: add to exception details about vertex statuses, what vertix is currently does not completed on time
/**
* Add timeout protection to execution
*/
ScheduledFuture<?> schedule = timeoutExecutorService.schedule(() -> {
/**
* Temporary solution.
* Should be fixed by completing all futures in processor chain
*/
if (!execution.getResultFuture().isDone()) {
execution.getResultFuture().completeExceptionally(new TimeoutException(String.format("Response for payload %s took more than %d ms.", payload, timeoutMs)));
}
if (!execution.getChainExecutionFuture().isDone()) {
execution.getChainExecutionFuture().completeExceptionally(new TimeoutException(String.format("Execution of payload %s took more than %d ms.", payload, timeoutMs)));
}
}, timeoutMs, TimeUnit.MILLISECONDS);
execution.getChainExecutionFuture().handleAsync((result, throwable) -> {
long count = pendingRequestCount.decrementAndGet();
if (count == 0) {
synchronized (pendingRequestCount) {
pendingRequestCount.notifyAll();
}
}
schedule.cancel(false);
return null;
});
execution.getResultFuture().thenRunAsync(payloadCall::stop);
execution.getChainExecutionFuture().thenRunAsync(executionCall::stop);
return new Execution<>(execution.getResultFuture(), execution.getChainExecutionFuture(), execution.getDebugProcessingVertexGraphState());
}
Aggregations