use of io.netty.handler.codec.http.HttpResponse in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method prepChannelForDownstreamCall.
protected void prepChannelForDownstreamCall(ChannelPool pool, Channel ch, StreamingCallback callback, Deque<Span> distributedSpanStackToUse, Map<String, String> mdcContextToUse, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, boolean performSubSpanAroundDownstreamCalls, long downstreamCallTimeoutMillis, ObjectHolder<Boolean> callActiveHolder, ObjectHolder<Boolean> lastChunkSentDownstreamHolder) throws SSLException, NoSuchAlgorithmException, KeyStoreException {
ChannelHandler chunkSenderHandler = new SimpleChannelInboundHandler<HttpObject>() {
@Override
protected void channelRead0(ChannelHandlerContext downstreamCallCtx, HttpObject msg) throws Exception {
try {
// the call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (msg instanceof LastHttpContent) {
lastChunkSentDownstreamHolder.heldObject = true;
if (performSubSpanAroundDownstreamCalls) {
// Complete the subspan.
runnableWithTracingAndMdc(() -> {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}, distributedSpanStackToUse, mdcContextToUse).run();
}
}
HttpObject msgToPass = msg;
if (msg instanceof HttpResponse) {
// We can't pass the original HttpResponse back to the callback due to intricacies of how
// Netty handles determining the last chunk. If we do, and the callback ends up writing
// the message out to the client (which happens during proxy routing for example), then
// msg's headers might get modified - potentially causing this channel pipeline to
// never send a LastHttpContent, which will in turn cause an indefinite hang.
HttpResponse origHttpResponse = (HttpResponse) msg;
HttpResponse httpResponse = (msg instanceof FullHttpResponse) ? new DefaultFullHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus(), ((FullHttpResponse) msg).content()) : new DefaultHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus());
httpResponse.headers().add(origHttpResponse.headers());
msgToPass = httpResponse;
}
callback.messageReceived(msgToPass);
} else {
if (shouldLogBadMessagesAfterRequestFinishes) {
runnableWithTracingAndMdc(() -> logger.warn("Received HttpObject msg when call was not active: {}", String.valueOf(msg)), distributedSpanStackToUse, mdcContextToUse).run();
}
}
} finally {
if (msg instanceof LastHttpContent) {
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "last content chunk sent", distributedSpanStackToUse, mdcContextToUse);
}
}
}
};
Consumer<Throwable> doErrorHandlingConsumer = (cause) -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(distributedSpanStackToUse, mdcContextToUse);
// call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (performSubSpanAroundDownstreamCalls) {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}
Tracer.getInstance().unregisterFromThread();
if (cause instanceof Errors.NativeIoException) {
// NativeIoExceptions are often setup to not have stack traces which is bad for debugging.
// Wrap it in a NativeIoExceptionWrapper that maps to a 503 since this is likely a busted
// connection and a second attempt should work.
cause = new NativeIoExceptionWrapper("Caught a NativeIoException in the downstream streaming call pipeline. Wrapped it in a " + "NativeIoExceptionWrapper so that it maps to a 503 and provides a usable stack trace " + "in the logs.", (Errors.NativeIoException) cause);
}
callback.unrecoverableErrorOccurred(cause);
} else {
if (cause instanceof DownstreamIdleChannelTimeoutException) {
logger.debug("A channel used for downstream calls will be closed because it was idle too long. " + "This is normal behavior and does not indicate a downstream call failure: {}", cause.toString());
} else {
logger.warn("Received exception in downstream call pipeline after the call was finished. " + "Not necessarily anything to worry about but in case it helps debugging the " + "exception was: {}", cause.toString());
}
}
} finally {
// Mark the channel as broken so it will be closed and removed from the pool when it is returned.
markChannelAsBroken(ch);
// Release it back to the pool if possible/necessary so the pool can do its usual cleanup.
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "error received in downstream pipeline: " + cause.toString(), distributedSpanStackToUse, mdcContextToUse);
// No matter what the cause is we want to make sure the channel is closed. Doing this raw ch.close()
// here will catch the cases where this channel does not have an active call but still needs to be
// closed (e.g. an idle channel timeout that happens in-between calls).
ch.close();
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
};
ChannelHandler errorHandler = new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext downstreamCallCtx, Throwable cause) throws Exception {
doErrorHandlingConsumer.accept(cause);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
if (logger.isDebugEnabled()) {
runnableWithTracingAndMdc(() -> logger.debug("Downstream channel closing. call_active={}, last_chunk_sent_downstream={}, channel_id={}", callActiveHolder.heldObject, lastChunkSentDownstreamHolder.heldObject, ctx.channel().toString()), distributedSpanStackToUse, mdcContextToUse).run();
}
// We only care if the channel was closed while the call was active.
if (callActiveHolder.heldObject)
doErrorHandlingConsumer.accept(new DownstreamChannelClosedUnexpectedlyException(ch));
super.channelInactive(ctx);
}
};
// Set up the HTTP client pipeline.
ChannelPipeline p = ch.pipeline();
List<String> registeredHandlerNames = p.names();
// couldn't be removed at that time because it wasn't in the channel's eventLoop.
if (registeredHandlerNames.contains(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME)) {
ChannelHandler idleHandler = p.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
if (idleHandler != null)
p.remove(idleHandler);
}
if (debugChannelLifecycleLoggingEnabled && !registeredHandlerNames.contains(DEBUG_LOGGER_HANDLER_NAME)) {
// Add the channel debug logger if desired.
p.addFirst(DEBUG_LOGGER_HANDLER_NAME, new LoggingHandler(DOWNSTREAM_CLIENT_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
}
// Add/replace a downstream call timeout detector.
addOrReplacePipelineHandler(new DownstreamIdleChannelTimeoutHandler(downstreamCallTimeoutMillis, () -> callActiveHolder.heldObject, true, "StreamingAsyncHttpClientChannel-call-timeout", distributedSpanStackToUse, mdcContextToUse), DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, p, registeredHandlerNames);
if (isSecureHttpsCall) {
// SSL call. Make sure we add the SSL handler if necessary.
if (!registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
if (clientSslCtx == null) {
if (relaxedHttpsValidation) {
clientSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init((KeyStore) null);
clientSslCtx = SslContextBuilder.forClient().trustManager(tmf).build();
}
}
p.addAfter(DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, SSL_HANDLER_NAME, clientSslCtx.newHandler(ch.alloc()));
}
} else {
// Not an SSL call. Remove the SSL handler if it's there.
if (registeredHandlerNames.contains(SSL_HANDLER_NAME))
p.remove(SSL_HANDLER_NAME);
}
// The HttpClientCodec handler deals with HTTP codec stuff so you don't have to. Set it up if it hasn't already
// been setup, and inspect it to make sure it's in a "ready to handle a new request" state. Some rare
// and currently unknown edgecases can cause us to hit this point with the HttpClientCodec in an unclean
// state, and if we barrel forward without cleaning this up the call will fail.
boolean pipelineContainsHttpClientCodec = registeredHandlerNames.contains(HTTP_CLIENT_CODEC_HANDLER_NAME);
boolean existingHttpClientCodecIsInBadState = false;
if (pipelineContainsHttpClientCodec) {
HttpClientCodec currentCodec = (HttpClientCodec) p.get(HTTP_CLIENT_CODEC_HANDLER_NAME);
int currentHttpClientCodecInboundState = determineHttpClientCodecInboundState(currentCodec);
if (currentHttpClientCodecInboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec inbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_inbound_state={}", currentHttpClientCodecInboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
} else {
int currentHttpClientCodecOutboundState = determineHttpClientCodecOutboundState(currentCodec);
if (currentHttpClientCodecOutboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec outbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_outbound_state={}", currentHttpClientCodecOutboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
}
}
}
// or replace it if it was in a bad state.
if (!pipelineContainsHttpClientCodec || existingHttpClientCodecIsInBadState) {
addOrReplacePipelineHandler(new HttpClientCodec(4096, 8192, 8192, true), HTTP_CLIENT_CODEC_HANDLER_NAME, p, registeredHandlerNames);
}
// Update the chunk sender handler and error handler to the newly created versions that know about the correct
// callback, dtrace info, etc to use for this request.
addOrReplacePipelineHandler(chunkSenderHandler, CHUNK_SENDER_HANDLER_NAME, p, registeredHandlerNames);
addOrReplacePipelineHandler(errorHandler, ERROR_HANDLER_NAME, p, registeredHandlerNames);
}
use of io.netty.handler.codec.http.HttpResponse in project riposte by Nike-Inc.
the class ProxyRouterEndpointExecutionHandler method doChannelRead.
@Override
public PipelineContinuationBehavior doChannelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
Endpoint<?> endpoint = state.getEndpointForExecution();
if (shouldHandleDoChannelReadMessage(msg, endpoint)) {
ProxyRouterProcessingState proxyRouterState = getOrCreateProxyRouterProcessingState(ctx);
ProxyRouterEndpoint endpointProxyRouter = ((ProxyRouterEndpoint) endpoint);
RequestInfo<?> requestInfo = state.getRequestInfo();
if (msg instanceof HttpRequest) {
if (requestInfo instanceof RiposteInternalRequestInfo) {
// Tell this RequestInfo that we'll be managing the release of content chunks, so that when
// RequestInfo.releaseAllResources() is called we don't have extra reference count removals.
((RiposteInternalRequestInfo) requestInfo).contentChunksWillBeReleasedExternally();
}
// We're supposed to start streaming. There may be pre-endpoint-execution validation logic or other work
// that needs to happen before the endpoint is executed, so set up the CompletableFuture for the
// endpoint call to only execute if the pre-endpoint-execution validation/work chain is successful.
CompletableFuture<DownstreamRequestFirstChunkInfo> firstChunkFuture = state.getPreEndpointExecutionWorkChain().thenCompose(functionWithTracingAndMdc(aVoid -> endpointProxyRouter.getDownstreamRequestFirstChunkInfo(requestInfo, longRunningTaskExecutor, ctx), ctx));
Long endpointTimeoutOverride = endpointProxyRouter.completableFutureTimeoutOverrideMillis();
long callTimeoutValueToUse = (endpointTimeoutOverride == null) ? defaultCompletableFutureTimeoutMillis : endpointTimeoutOverride;
// When the first chunk is ready, stream it downstream and set up what happens afterward.
firstChunkFuture.whenComplete((downstreamRequestFirstChunkInfo, throwable) -> {
Optional<ManualModeTask<HttpResponse>> circuitBreakerManualTask = getCircuitBreaker(downstreamRequestFirstChunkInfo, ctx).map(CircuitBreaker::newManualModeTask);
StreamingCallback callback = new StreamingCallbackForCtx(ctx, circuitBreakerManualTask, endpointProxyRouter, requestInfo, proxyRouterState);
if (throwable != null) {
// Something blew up trying to determine the first chunk info.
callback.unrecoverableErrorOccurred(throwable);
} else if (!ctx.channel().isOpen()) {
// The channel was closed for some reason before we were able to start streaming.
String errorMsg = "The channel from the original caller was closed before we could begin the " + "downstream call.";
Exception channelClosedException = new RuntimeException(errorMsg);
runnableWithTracingAndMdc(() -> logger.warn(errorMsg), ctx).run();
callback.unrecoverableErrorOccurred(channelClosedException);
} else {
try {
// Ok we have the first chunk info. Start by setting the downstream call info in the request
// info (i.e. for access logs if desired)
requestInfo.addRequestAttribute(DOWNSTREAM_CALL_PATH_REQUEST_ATTR_KEY, HttpUtils.extractPath(downstreamRequestFirstChunkInfo.firstChunk.getUri()));
// Try our circuit breaker (if we have one).
Throwable circuitBreakerException = null;
try {
circuitBreakerManualTask.ifPresent(ManualModeTask::throwExceptionIfCircuitBreakerIsOpen);
} catch (Throwable t) {
circuitBreakerException = t;
}
if (circuitBreakerException == null) {
// No circuit breaker, or the breaker is closed. We can now stream the first chunk info.
String downstreamHost = downstreamRequestFirstChunkInfo.host;
int downstreamPort = downstreamRequestFirstChunkInfo.port;
HttpRequest downstreamRequestFirstChunk = downstreamRequestFirstChunkInfo.firstChunk;
boolean isSecureHttpsCall = downstreamRequestFirstChunkInfo.isHttps;
boolean relaxedHttpsValidation = downstreamRequestFirstChunkInfo.relaxedHttpsValidation;
// Tell the proxyRouterState about the streaming callback so that
// callback.unrecoverableErrorOccurred(...) can be called in the case of an error
// on subsequent chunks.
proxyRouterState.setStreamingCallback(callback);
// Setup the streaming channel future with everything it needs to kick off the
// downstream request.
proxyRouterState.setStreamingStartTimeNanos(System.nanoTime());
CompletableFuture<StreamingChannel> streamingChannel = streamingAsyncHttpClient.streamDownstreamCall(downstreamHost, downstreamPort, downstreamRequestFirstChunk, isSecureHttpsCall, relaxedHttpsValidation, callback, callTimeoutValueToUse, ctx);
// Tell the streaming channel future what to do when it completes.
streamingChannel = streamingChannel.whenComplete((sc, cause) -> {
if (cause == null) {
// Successfully connected and sent the first chunk. We can now safely let
// the remaining content chunks through for streaming.
proxyRouterState.triggerChunkProcessing(sc);
} else {
// Something blew up while connecting to the downstream server.
callback.unrecoverableErrorOccurred(cause);
}
});
// Set the streaming channel future on the state so it can be connected to.
proxyRouterState.setStreamingChannelCompletableFuture(streamingChannel);
} else {
// Circuit breaker is tripped (or otherwise threw an unexpected exception). Immediately
// short circuit the error back to the client.
callback.unrecoverableErrorOccurred(circuitBreakerException);
}
} catch (Throwable t) {
callback.unrecoverableErrorOccurred(t);
}
}
});
} else if (msg instanceof HttpContent) {
HttpContent msgContent = (HttpContent) msg;
// chunk-streaming behavior and subsequent cleanup for the given HttpContent.
if (!releaseContentChunkIfStreamAlreadyFailed(msgContent, proxyRouterState)) {
registerChunkStreamingAction(proxyRouterState, msgContent, ctx);
}
}
return PipelineContinuationBehavior.DO_NOT_FIRE_CONTINUE_EVENT;
}
return PipelineContinuationBehavior.CONTINUE;
}
use of io.netty.handler.codec.http.HttpResponse in project riposte by Nike-Inc.
the class ProcessFinalResponseOutputHandler method write.
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
// Deal with the final outbound HttpResponse
if (msg instanceof HttpResponse) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
if (state != null)
state.setActualResponseObject((HttpResponse) msg);
}
// Deal with the final outbound body content
if (msg instanceof HttpContent) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
if (state != null && state.getResponseInfo() != null) {
ResponseInfo<?> responseInfo = state.getResponseInfo();
long contentBytes = ((HttpContent) msg).content().readableBytes();
if (responseInfo.getFinalContentLength() == null)
responseInfo.setFinalContentLength(contentBytes);
else
responseInfo.setFinalContentLength(responseInfo.getFinalContentLength() + contentBytes);
}
}
super.write(ctx, msg, promise);
}
use of io.netty.handler.codec.http.HttpResponse in project riposte by Nike-Inc.
the class SmartHttpContentCompressor method write.
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
allowCompressionForThisRequest = false;
if (state != null) {
// We only want to allow compression if the endpoint being hit is *not* a ProxyRouterEndpoint, the response is full, and the response size
// is greater than the threshold
boolean isFull = msg instanceof HttpResponse && msg instanceof LastHttpContent;
boolean endpointAllowed = endpointAllowsCompression(state.getEndpointForExecution());
boolean responseInfoAllowed = state.getResponseInfo() == null || !state.getResponseInfo().isPreventCompressedOutput();
if (isFull && endpointAllowed && responseInfoAllowed && ((LastHttpContent) msg).content().readableBytes() > responseSizeThresholdBytes) {
allowCompressionForThisRequest = true;
}
}
super.write(ctx, msg, promise);
}
use of io.netty.handler.codec.http.HttpResponse in project riposte by Nike-Inc.
the class ProcessFinalResponseOutputHandlerTest method write_calls_setActualResponseObject_on_state_if_msg_is_HttpResponse.
@Test
public void write_calls_setActualResponseObject_on_state_if_msg_is_HttpResponse() throws Exception {
// given
HttpResponse msgMock = mock(HttpResponse.class);
// when
handler.write(ctxMock, msgMock, promiseMock);
// then
verify(stateMock).setActualResponseObject(msgMock);
}
Aggregations