use of com.nike.riposte.server.http.HttpProcessingState in project riposte by Nike-Inc.
the class BaseInboundHandlerWithTracingAndMdcSupportTest method beforeMethod.
@Before
public void beforeMethod() {
handler = new BaseInboundHandlerWithTracingAndMdcSupport() {
};
channelMock = mock(Channel.class);
ctxMock = mock(ChannelHandlerContext.class);
//noinspection unchecked
stateAttributeMock = mock(Attribute.class);
state = new HttpProcessingState();
doReturn(channelMock).when(ctxMock).channel();
doReturn(stateAttributeMock).when(channelMock).attr(ChannelAttributes.HTTP_PROCESSING_STATE_ATTRIBUTE_KEY);
doReturn(state).when(stateAttributeMock).get();
resetTracingAndMdc();
}
use of com.nike.riposte.server.http.HttpProcessingState in project riposte by Nike-Inc.
the class ProxyRouterEndpointExecutionHandler method doChannelRead.
@Override
public PipelineContinuationBehavior doChannelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
Endpoint<?> endpoint = state.getEndpointForExecution();
if (shouldHandleDoChannelReadMessage(msg, endpoint)) {
ProxyRouterProcessingState proxyRouterState = getOrCreateProxyRouterProcessingState(ctx);
ProxyRouterEndpoint endpointProxyRouter = ((ProxyRouterEndpoint) endpoint);
RequestInfo<?> requestInfo = state.getRequestInfo();
if (msg instanceof HttpRequest) {
if (requestInfo instanceof RiposteInternalRequestInfo) {
// Tell this RequestInfo that we'll be managing the release of content chunks, so that when
// RequestInfo.releaseAllResources() is called we don't have extra reference count removals.
((RiposteInternalRequestInfo) requestInfo).contentChunksWillBeReleasedExternally();
}
// We're supposed to start streaming. There may be pre-endpoint-execution validation logic or other work
// that needs to happen before the endpoint is executed, so set up the CompletableFuture for the
// endpoint call to only execute if the pre-endpoint-execution validation/work chain is successful.
CompletableFuture<DownstreamRequestFirstChunkInfo> firstChunkFuture = state.getPreEndpointExecutionWorkChain().thenCompose(functionWithTracingAndMdc(aVoid -> endpointProxyRouter.getDownstreamRequestFirstChunkInfo(requestInfo, longRunningTaskExecutor, ctx), ctx));
Long endpointTimeoutOverride = endpointProxyRouter.completableFutureTimeoutOverrideMillis();
long callTimeoutValueToUse = (endpointTimeoutOverride == null) ? defaultCompletableFutureTimeoutMillis : endpointTimeoutOverride;
// When the first chunk is ready, stream it downstream and set up what happens afterward.
firstChunkFuture.whenComplete((downstreamRequestFirstChunkInfo, throwable) -> {
Optional<ManualModeTask<HttpResponse>> circuitBreakerManualTask = getCircuitBreaker(downstreamRequestFirstChunkInfo, ctx).map(CircuitBreaker::newManualModeTask);
StreamingCallback callback = new StreamingCallbackForCtx(ctx, circuitBreakerManualTask, endpointProxyRouter, requestInfo, proxyRouterState);
if (throwable != null) {
// Something blew up trying to determine the first chunk info.
callback.unrecoverableErrorOccurred(throwable);
} else if (!ctx.channel().isOpen()) {
// The channel was closed for some reason before we were able to start streaming.
String errorMsg = "The channel from the original caller was closed before we could begin the " + "downstream call.";
Exception channelClosedException = new RuntimeException(errorMsg);
runnableWithTracingAndMdc(() -> logger.warn(errorMsg), ctx).run();
callback.unrecoverableErrorOccurred(channelClosedException);
} else {
try {
// Ok we have the first chunk info. Start by setting the downstream call info in the request
// info (i.e. for access logs if desired)
requestInfo.addRequestAttribute(DOWNSTREAM_CALL_PATH_REQUEST_ATTR_KEY, HttpUtils.extractPath(downstreamRequestFirstChunkInfo.firstChunk.getUri()));
// Try our circuit breaker (if we have one).
Throwable circuitBreakerException = null;
try {
circuitBreakerManualTask.ifPresent(ManualModeTask::throwExceptionIfCircuitBreakerIsOpen);
} catch (Throwable t) {
circuitBreakerException = t;
}
if (circuitBreakerException == null) {
// No circuit breaker, or the breaker is closed. We can now stream the first chunk info.
String downstreamHost = downstreamRequestFirstChunkInfo.host;
int downstreamPort = downstreamRequestFirstChunkInfo.port;
HttpRequest downstreamRequestFirstChunk = downstreamRequestFirstChunkInfo.firstChunk;
boolean isSecureHttpsCall = downstreamRequestFirstChunkInfo.isHttps;
boolean relaxedHttpsValidation = downstreamRequestFirstChunkInfo.relaxedHttpsValidation;
// Tell the proxyRouterState about the streaming callback so that
// callback.unrecoverableErrorOccurred(...) can be called in the case of an error
// on subsequent chunks.
proxyRouterState.setStreamingCallback(callback);
// Setup the streaming channel future with everything it needs to kick off the
// downstream request.
proxyRouterState.setStreamingStartTimeNanos(System.nanoTime());
CompletableFuture<StreamingChannel> streamingChannel = streamingAsyncHttpClient.streamDownstreamCall(downstreamHost, downstreamPort, downstreamRequestFirstChunk, isSecureHttpsCall, relaxedHttpsValidation, callback, callTimeoutValueToUse, ctx);
// Tell the streaming channel future what to do when it completes.
streamingChannel = streamingChannel.whenComplete((sc, cause) -> {
if (cause == null) {
// Successfully connected and sent the first chunk. We can now safely let
// the remaining content chunks through for streaming.
proxyRouterState.triggerChunkProcessing(sc);
} else {
// Something blew up while connecting to the downstream server.
callback.unrecoverableErrorOccurred(cause);
}
});
// Set the streaming channel future on the state so it can be connected to.
proxyRouterState.setStreamingChannelCompletableFuture(streamingChannel);
} else {
// Circuit breaker is tripped (or otherwise threw an unexpected exception). Immediately
// short circuit the error back to the client.
callback.unrecoverableErrorOccurred(circuitBreakerException);
}
} catch (Throwable t) {
callback.unrecoverableErrorOccurred(t);
}
}
});
} else if (msg instanceof HttpContent) {
HttpContent msgContent = (HttpContent) msg;
// chunk-streaming behavior and subsequent cleanup for the given HttpContent.
if (!releaseContentChunkIfStreamAlreadyFailed(msgContent, proxyRouterState)) {
registerChunkStreamingAction(proxyRouterState, msgContent, ctx);
}
}
return PipelineContinuationBehavior.DO_NOT_FIRE_CONTINUE_EVENT;
}
return PipelineContinuationBehavior.CONTINUE;
}
use of com.nike.riposte.server.http.HttpProcessingState in project riposte by Nike-Inc.
the class NonblockingEndpointExecutionHandler method doChannelRead.
@Override
public PipelineContinuationBehavior doChannelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
Endpoint<?> endpoint = state.getEndpointForExecution();
if (shouldHandleDoChannelReadMessage(msg, endpoint)) {
// We only do something when the last chunk of content has arrived.
if (msg instanceof LastHttpContent) {
NonblockingEndpoint nonblockingEndpoint = ((NonblockingEndpoint) endpoint);
// We're supposed to execute the endpoint. There may be pre-endpoint-execution validation logic or
// other work that needs to happen before the endpoint is executed, so set up the
// CompletableFuture for the endpoint call to only execute if the pre-endpoint-execution
// validation/work chain is successful.
RequestInfo<?> requestInfo = state.getRequestInfo();
@SuppressWarnings("unchecked") CompletableFuture<ResponseInfo<?>> responseFuture = state.getPreEndpointExecutionWorkChain().thenCompose(functionWithTracingAndMdc(aVoid -> (CompletableFuture<ResponseInfo<?>>) nonblockingEndpoint.execute(requestInfo, longRunningTaskExecutor, ctx), ctx));
// Register an on-completion callback so we can be notified when the CompletableFuture finishes.
responseFuture.whenComplete((responseInfo, throwable) -> {
if (throwable != null)
asyncErrorCallback(ctx, throwable);
else
asyncCallback(ctx, responseInfo);
});
// Also schedule a timeout check with our Netty event loop to make sure we kill the
// CompletableFuture if it goes on too long.
long timeoutValueToUse = (nonblockingEndpoint.completableFutureTimeoutOverrideMillis() == null) ? defaultCompletableFutureTimeoutMillis : nonblockingEndpoint.completableFutureTimeoutOverrideMillis();
ScheduledFuture<?> responseTimeoutScheduledFuture = ctx.channel().eventLoop().schedule(() -> {
if (!responseFuture.isDone()) {
runnableWithTracingAndMdc(() -> logger.error("A non-blocking endpoint's CompletableFuture did not finish within " + "the allotted timeout ({} milliseconds). Forcibly cancelling it.", timeoutValueToUse), ctx).run();
@SuppressWarnings("unchecked") Throwable errorToUse = nonblockingEndpoint.getCustomTimeoutExceptionCause(requestInfo, ctx);
if (errorToUse == null)
errorToUse = new NonblockingEndpointCompletableFutureTimedOut(timeoutValueToUse);
responseFuture.completeExceptionally(errorToUse);
}
}, timeoutValueToUse, TimeUnit.MILLISECONDS);
/*
The problem with the scheduled timeout check is that it holds on to the RequestInfo,
ChannelHandlerContext, and a bunch of other stuff that *should* become garbage the instant the
request finishes, but because of the timeout check it has to wait until the check executes
before the garbage is collectable. In high volume servers the default 60 second timeout is way
too long and acts like a memory leak and results in garbage collection thrashing if the
available memory can be filled within the 60 second timeout. To combat this we cancel the
timeout future when the endpoint future finishes. Netty will remove the cancelled timeout future
from its scheduled list within a short time, thus letting the garbage be collected.
*/
responseFuture.whenComplete((responseInfo, throwable) -> {
if (!responseTimeoutScheduledFuture.isDone())
responseTimeoutScheduledFuture.cancel(false);
});
}
// completes (see asyncCallback() and asyncErrorCallback()).
return PipelineContinuationBehavior.DO_NOT_FIRE_CONTINUE_EVENT;
}
// error to be returned to the client.
return PipelineContinuationBehavior.CONTINUE;
}
use of com.nike.riposte.server.http.HttpProcessingState in project riposte by Nike-Inc.
the class ProcessFinalResponseOutputHandler method write.
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
// Deal with the final outbound HttpResponse
if (msg instanceof HttpResponse) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
if (state != null)
state.setActualResponseObject((HttpResponse) msg);
}
// Deal with the final outbound body content
if (msg instanceof HttpContent) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
if (state != null && state.getResponseInfo() != null) {
ResponseInfo<?> responseInfo = state.getResponseInfo();
long contentBytes = ((HttpContent) msg).content().readableBytes();
if (responseInfo.getFinalContentLength() == null)
responseInfo.setFinalContentLength(contentBytes);
else
responseInfo.setFinalContentLength(responseInfo.getFinalContentLength() + contentBytes);
}
}
super.write(ctx, msg, promise);
}
use of com.nike.riposte.server.http.HttpProcessingState in project riposte by Nike-Inc.
the class ResponseFilterHandler method executeResponseFilters.
protected void executeResponseFilters(ChannelHandlerContext ctx) {
try {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
// (for example).
if (state.isResponseSendingStarted())
return;
// RequestHasBeenHandledVerificationHandler should have made sure that state.getResponseInfo() is not null.
ResponseInfo<?> currentResponseInfo = state.getResponseInfo();
for (RequestAndResponseFilter filter : filtersInResponseProcessingOrder) {
try {
currentResponseInfo = responseInfoUpdateNoNulls(filter, currentResponseInfo, filter.filterResponse(currentResponseInfo, state.getRequestInfo(), ctx));
} catch (Throwable ex) {
logger.error("An error occurred while processing a request filter. This error will be ignored and the " + "filtering/processing will continue normally, however this error should be fixed (filters " + "should never throw errors). filter_class={}", filter.getClass().getName(), ex);
}
}
state.setResponseInfo(currentResponseInfo);
} catch (Throwable ex) {
logger.error("An error occurred while setting up to process response filters. This error will be ignored and the " + "pipeline will continue normally without any filtering having occurred, however this error should be " + "fixed (it should be impossible to reach here).", ex);
}
}
Aggregations