use of com.nike.riposte.server.http.ProxyRouterProcessingState in project riposte by Nike-Inc.
the class ResponseSenderHandler method sendResponse.
protected void sendResponse(ChannelHandlerContext ctx, Object msg, boolean sendLastDitchResponseInline) throws JsonProcessingException {
try {
// Try to send the response.
doSendResponse(ctx, msg);
} catch (Exception origSendEx) {
boolean shouldRethrowOriginalSendEx = true;
// Something went wrong while trying to send the response. We want to create a generic service error and
// send that back to the caller if possible.
// The HttpProcessingState will never be null thanks to ExceptionHandlingHandler
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
boolean alreadyTriedSendingLastDitchResponse = alreadyTriedSendingLastDitchResponse(state);
// Cancel any proxy router streaming that may be happening.
ProxyRouterProcessingState proxyRouterProcessingState = ChannelAttributes.getProxyRouterProcessingStateForChannel(ctx).get();
if (proxyRouterProcessingState != null) {
proxyRouterProcessingState.cancelRequestStreaming(origSendEx, ctx);
proxyRouterProcessingState.cancelDownstreamRequest(origSendEx);
}
// we can create a generic service error and try sending that.
if (state.isResponseSendingStarted()) {
runnableWithTracingAndMdc(() -> {
logger.error("An unexpected error occurred while sending the response. At least part of the " + "response was sent, so there's nothing we can do at this point but close the connection.", origSendEx);
// Add this error to the current span if possible, but only if no error tag already exists.
Span currentSpan = Tracer.getInstance().getCurrentSpan();
if (currentSpan != null && currentSpan.getTags().get(KnownZipkinTags.ERROR) == null) {
String errorTagValue = (origSendEx.getMessage() == null) ? origSendEx.getClass().getSimpleName() : origSendEx.getMessage();
currentSpan.putTag(KnownZipkinTags.ERROR, errorTagValue);
}
}, ctx).run();
ctx.channel().close();
} else if (!alreadyTriedSendingLastDitchResponse) {
// Mark that we've tried doing the last-ditch response so that we only ever attempt it once.
markTriedSendingLastDitchResponse(state);
// We haven't already started response sending, so we can try sending a last ditch error response
// instead that represents the response-sending exception.
String errorId = UUID.randomUUID().toString();
ResponseInfo<?> lastDitchErrorResponseInfo = ResponseInfo.newBuilder(new ErrorResponseBodyImpl(errorId, Collections.singleton(SampleCoreApiError.GENERIC_SERVICE_ERROR))).withHeaders(new DefaultHttpHeaders().set("error_uid", errorId)).withHttpStatusCode(500).build();
state.setResponseInfo(lastDitchErrorResponseInfo, origSendEx);
runnableWithTracingAndMdc(() -> logger.error("An unexpected error occurred while attempting to send a response. We'll attempt to send a " + "last-ditch error message. error_uid={}", errorId, origSendEx), ctx).run();
if (sendLastDitchResponseInline) {
doSendResponse(ctx, msg);
// The last ditch response was successfully sent - we don't need to rethrow the original exception.
shouldRethrowOriginalSendEx = false;
}
}
if (shouldRethrowOriginalSendEx) {
// will be allowed to propagate out of this class.
throw origSendEx;
}
}
}
use of com.nike.riposte.server.http.ProxyRouterProcessingState in project riposte by Nike-Inc.
the class AsyncNettyHelper method executeOnlyIfChannelIsActive.
/**
* Executes the given runnable only if {@code ctx.channel().isActive()} returns true. If the channel is not active
* then a warning is logged, resources are released, the distributed trace is completed (if appropriate), and the
* runnable is *not* executed. Call this when you're about to do something and it's possible that the channel has
* been closed. Usually this is only necessary when you're manually firing an event on the given {@code ctx} after
* some asynchronous delay (e.g. a future completes, or a timeout was scheduled on the event loop, etc).
*
* @param ctx
* The {@link ChannelHandlerContext} that contains the state for this request.
* @param markerForLogs
* This will be put into the log warning if the channel is not active to help you identify where the problem
* occurred. This is usually some arbitrary "ID" representing the code that is calling this method.
* @param thingToMaybeExecute
* This will be executed if the channel is active, and ignored if the channel is not active.
*
* @return true if the channel was active and the thingToMaybeExecute was executed, false if the channel was not
* active and things were cleaned up as per this method description.
*/
public static boolean executeOnlyIfChannelIsActive(ChannelHandlerContext ctx, String markerForLogs, Runnable thingToMaybeExecute) {
if (ctx.channel().isActive()) {
// The channel is active, so execute the runnable.
thingToMaybeExecute.run();
return true;
} else {
Pair<Deque<Span>, Map<String, String>> origTracingAndMdcPair = linkTracingAndMdcToCurrentThread(ctx);
try {
// The channel is *not* active. Log a warning, release resources,
// and complete any existing distributed trace.
logger.warn("Unable to continue - channel is no longer active. The client may have closed the connection. " + "Releasing resources and stopping request processing. channel_inactive_cannot_continue_marker={}", markerForLogs);
// Gather the stuff we want to try to release resources for.
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
RequestInfo<?> requestInfo = (state == null) ? null : state.getRequestInfo();
ProxyRouterProcessingState proxyRouterState = ChannelAttributes.getProxyRouterProcessingStateForChannel(ctx).get();
// Tell the RequestInfo it can release all its resources.
if (requestInfo != null)
requestInfo.releaseAllResources();
// is closing) and doing this will cause any resources it's holding onto to be released.
if (proxyRouterState != null) {
Throwable reason = new RuntimeException("Cannot execute - Server worker channel closed");
proxyRouterState.cancelRequestStreaming(reason, ctx);
proxyRouterState.cancelDownstreamRequest(reason);
}
// out a second time.
if (state == null || !state.isTraceCompletedOrScheduled()) {
// span tagging and span name if it hasn't already been done.
if (state != null) {
state.setTraceCompletedOrScheduled(true);
state.handleTracingResponseTaggingAndFinalSpanNameIfNotAlreadyDone();
}
Tracer.getInstance().completeRequestSpan();
}
return false;
} finally {
unlinkTracingAndMdcFromCurrentThread(origTracingAndMdcPair);
}
}
}
use of com.nike.riposte.server.http.ProxyRouterProcessingState in project riposte by Nike-Inc.
the class StreamingAsyncHttpClientTest method mockChannelHandlerContext.
private ChannelHandlerContext mockChannelHandlerContext() {
ChannelHandlerContext mockContext = mock(ChannelHandlerContext.class);
when(mockContext.channel()).thenReturn(mock(Channel.class));
@SuppressWarnings("unchecked") Attribute<HttpProcessingState> mockHttpProcessingStateAttribute = mock(Attribute.class);
Attribute<ProxyRouterProcessingState> mockProxyStateAttribute = mock(Attribute.class);
when(mockContext.channel().attr(ChannelAttributes.HTTP_PROCESSING_STATE_ATTRIBUTE_KEY)).thenReturn(mockHttpProcessingStateAttribute);
when(mockContext.channel().attr(ChannelAttributes.HTTP_PROCESSING_STATE_ATTRIBUTE_KEY).get()).thenReturn(mock(HttpProcessingState.class));
when(mockContext.channel().attr(ChannelAttributes.PROXY_ROUTER_PROCESSING_STATE_ATTRIBUTE_KEY)).thenReturn(mockProxyStateAttribute);
when(mockContext.channel().attr(ChannelAttributes.PROXY_ROUTER_PROCESSING_STATE_ATTRIBUTE_KEY).get()).thenReturn(mock(ProxyRouterProcessingState.class));
return mockContext;
}
use of com.nike.riposte.server.http.ProxyRouterProcessingState in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method streamDownstreamCall.
/**
* TODO: Fully document me.
* <br/>
* NOTE: The returned CompletableFuture will only be completed successfully if the connection to the downstream
* server was successful and the initialRequestChunk was successfully written out. This has implications for
* initialRequestChunk regarding releasing its reference count (i.e. calling {@link
* io.netty.util.ReferenceCountUtil#release(Object)} and passing in initialRequestChunk). If the returned
* CompletableFuture is successful it means initialRequestChunk's reference count will already be reduced by one
* relative to when this method was called because it will have been passed to a successful {@link
* ChannelHandlerContext#writeAndFlush(Object)} method call.
* <p/>
* Long story short - assume initialRequestChunk is an object with a reference count of x:
* <ul>
* <li>
* If the returned CompletableFuture is successful, then when it completes successfully
* initialRequestChunk's reference count will be x - 1
* </li>
* <li>
* If the returned CompletableFuture is *NOT* successful, then when it completes initialRequestChunk's
* reference count will still be x
* </li>
* </ul>
*/
public CompletableFuture<StreamingChannel> streamDownstreamCall(String downstreamHost, int downstreamPort, HttpRequest initialRequestChunk, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, StreamingCallback callback, long downstreamCallTimeoutMillis, boolean performSubSpanAroundDownstreamCalls, boolean addTracingHeadersToDownstreamCall, @NotNull ProxyRouterProcessingState proxyRouterProcessingState, @NotNull RequestInfo<?> requestInfo, ChannelHandlerContext ctx) {
CompletableFuture<StreamingChannel> streamingChannel = new CompletableFuture<>();
// set host header. include port in value when it is a non-default port
boolean isDefaultPort = (downstreamPort == 80 && !isSecureHttpsCall) || (downstreamPort == 443 && isSecureHttpsCall);
String hostHeaderValue = (isDefaultPort) ? downstreamHost : downstreamHost + ":" + downstreamPort;
initialRequestChunk.headers().set(HttpHeaderNames.HOST, hostHeaderValue);
long beforeConnectionStartTimeNanos = System.nanoTime();
// Create a connection to the downstream server.
ChannelPool pool = getPooledChannelFuture(downstreamHost, downstreamPort);
Future<Channel> channelFuture = pool.acquire();
// Add a listener that kicks off the downstream call once the connection is completed.
channelFuture.addListener(future -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
long connectionSetupTimeNanos = System.nanoTime() - beforeConnectionStartTimeNanos;
requestInfo.addRequestAttribute(DOWNSTREAM_CALL_CONNECTION_SETUP_TIME_NANOS_REQUEST_ATTR_KEY, connectionSetupTimeNanos);
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(ctx);
if (logger.isDebugEnabled()) {
logger.debug("CONNECTION SETUP TIME NANOS: {}", connectionSetupTimeNanos);
}
if (!future.isSuccess()) {
try {
// We did not connect to the downstream host successfully. Notify the callback.
streamingChannel.completeExceptionally(new WrapperException("Unable to connect to downstream host: " + downstreamHost, future.cause()));
} finally {
Channel ch = channelFuture.getNow();
if (ch != null) {
// We likely will never reach here since the channel future was not successful, however if
// we *do* manage to get here somehow, then mark the channel broken and release it back
// to the pool.
markChannelAsBroken(ch);
pool.release(ch);
}
}
return;
}
// Do a subspan around the downstream call if desired.
if (performSubSpanAroundDownstreamCalls) {
// TODO: The subspan start stuff should probably be moved to the beginning of
// streamDownstreamCall(), so that we pick up connection setup time (and can annotate conn
// start/finish). For now, we'll fake it by annotating conn start/finish time on the subspan
// at a negative time offset. So they'll be "in the past" from the perspective of the subspan.
// Add the subspan.
String spanName = getSubspanSpanName(initialRequestChunk, requestInfo, proxySpanTaggingStrategy);
// Start a new child/subspan for this call if possible, falling back to a new request span (rather
// than child/subspan) if there's no current span on the thread. The
// startSpanInCurrentContext() method will do the right thing here in either case.
Span subspan = Tracer.getInstance().startSpanInCurrentContext(spanName, Span.SpanPurpose.CLIENT);
// Do the auto-tagging based on the request.
proxySpanTaggingStrategy.handleRequestTagging(subspan, initialRequestChunk);
// this is something we definitely want for proxy/router requests.
try {
subspan.putTag(KnownZipkinTags.HTTP_HOST, downstreamHost + ":" + downstreamPort);
} catch (Throwable t) {
logger.error("An unexpected error occurred while adding downstream host and port tags. The error will " + "be swallowed to avoid doing any damage, but your span may be missing some expected " + "tags. This error should be fixed.", t);
}
// Add the initial HttpRequest to our ProxyRouterProcessingState so it's available for final
// response tagging and span naming at the end.
proxyRouterProcessingState.setProxyHttpRequest(initialRequestChunk);
// start time.
if (proxySpanTaggingStrategy.shouldAddConnStartAnnotation()) {
subspan.addTimestampedAnnotation(TimestampedAnnotation.forEpochMicrosWithNanoOffset(subspan.getSpanStartTimeEpochMicros(), -connectionSetupTimeNanos, proxySpanTaggingStrategy.connStartAnnotationName()));
}
if (proxySpanTaggingStrategy.shouldAddConnFinishAnnotation()) {
subspan.addTimestampedAnnotation(TimestampedAnnotation.forEpochMicros(subspan.getSpanStartTimeEpochMicros(), proxySpanTaggingStrategy.connFinishAnnotationName()));
}
}
Deque<Span> distributedSpanStackToUse = Tracer.getInstance().getCurrentSpanStackCopy();
Map<String, String> mdcContextToUse = MDC.getCopyOfContextMap();
@Nullable final Span spanForDownstreamCall = (distributedSpanStackToUse == null) ? null : distributedSpanStackToUse.peek();
// Add distributed trace headers to the downstream call if desired and we have a current span.
if (addTracingHeadersToDownstreamCall && spanForDownstreamCall != null) {
HttpRequestTracingUtils.propagateTracingHeaders((headerKey, headerValue) -> {
if (headerValue != null) {
initialRequestChunk.headers().set(headerKey, headerValue);
}
}, spanForDownstreamCall);
}
Channel ch = channelFuture.getNow();
if (logger.isDebugEnabled())
logger.debug("Channel ID of the Channel pulled from the pool: {}", ch.toString());
// We may not be in the right thread to modify the channel pipeline and write data. If we're in the
// wrong thread we can get deadlock type situations. By running the relevant bits in the channel's
// event loop we're guaranteed it will be run in the correct thread.
ch.eventLoop().execute(runnableWithTracingAndMdc(() -> {
BiConsumer<String, Throwable> prepChannelErrorHandler = (errorMessage, cause) -> {
try {
streamingChannel.completeExceptionally(new WrapperException(errorMessage, cause));
} finally {
// This channel may be permanently busted depending on the error, so mark it broken and let
// the pool close it and clean it up.
markChannelAsBroken(ch);
pool.release(ch);
}
};
try {
ObjectHolder<Boolean> callActiveHolder = new ObjectHolder<>();
callActiveHolder.heldObject = true;
ObjectHolder<Boolean> lastChunkSentDownstreamHolder = new ObjectHolder<>();
lastChunkSentDownstreamHolder.heldObject = false;
prepChannelForDownstreamCall(downstreamHost, downstreamPort, pool, ch, callback, distributedSpanStackToUse, mdcContextToUse, isSecureHttpsCall, relaxedHttpsValidation, performSubSpanAroundDownstreamCalls, downstreamCallTimeoutMillis, callActiveHolder, lastChunkSentDownstreamHolder, proxyRouterProcessingState, spanForDownstreamCall);
logInitialRequestChunk(initialRequestChunk, downstreamHost, downstreamPort);
// Send the HTTP request, and do a wire-send start annotation on the subspan if desired.
if (spanForDownstreamCall != null && proxySpanTaggingStrategy.shouldAddWireSendStartAnnotation()) {
spanForDownstreamCall.addTimestampedAnnotationForCurrentTime(proxySpanTaggingStrategy.wireSendStartAnnotationName());
}
ChannelFuture writeFuture = ch.writeAndFlush(initialRequestChunk);
// After the initial chunk has been sent we'll open the floodgates
// for any further chunk streaming
writeFuture.addListener(completedWriteFuture -> {
if (completedWriteFuture.isSuccess())
streamingChannel.complete(new StreamingChannel(ch, pool, callActiveHolder, lastChunkSentDownstreamHolder, distributedSpanStackToUse, mdcContextToUse, spanForDownstreamCall, proxySpanTaggingStrategy));
else {
prepChannelErrorHandler.accept("Writing the first HttpRequest chunk to the downstream service failed.", completedWriteFuture.cause());
// noinspection UnnecessaryReturnStatement
return;
}
});
} catch (SSLException | NoSuchAlgorithmException | KeyStoreException ex) {
prepChannelErrorHandler.accept("Error setting up SSL context for downstream call", ex);
// noinspection UnnecessaryReturnStatement
return;
} catch (Throwable t) {
// If we don't catch and handle this here it gets swallowed since we're in a Runnable
prepChannelErrorHandler.accept("An unexpected error occurred while prepping the channel pipeline for the downstream call", t);
// noinspection UnnecessaryReturnStatement
return;
}
}, ctx));
} catch (Throwable ex) {
try {
String errorMsg = "Error occurred attempting to send first chunk (headers/etc) downstream";
Exception errorToFire = new WrapperException(errorMsg, ex);
logger.warn(errorMsg, errorToFire);
streamingChannel.completeExceptionally(errorToFire);
} finally {
Channel ch = channelFuture.getNow();
if (ch != null) {
// Depending on where the error was thrown the channel may or may not exist. If it does exist,
// then assume it's unusable, mark it as broken, and let the pool close it and remove it.
markChannelAsBroken(ch);
pool.release(ch);
}
}
} finally {
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
});
return streamingChannel;
}
use of com.nike.riposte.server.http.ProxyRouterProcessingState in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method prepChannelForDownstreamCall.
protected void prepChannelForDownstreamCall(String downstreamHost, int downstreamPort, ChannelPool pool, Channel ch, StreamingCallback callback, Deque<Span> distributedSpanStackToUse, Map<String, String> mdcContextToUse, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, boolean performSubSpanAroundDownstreamCalls, long downstreamCallTimeoutMillis, ObjectHolder<Boolean> callActiveHolder, ObjectHolder<Boolean> lastChunkSentDownstreamHolder, ProxyRouterProcessingState proxyRouterProcessingState, @Nullable Span spanForDownstreamCall) throws SSLException, NoSuchAlgorithmException, KeyStoreException {
ChannelHandler chunkSenderHandler = new SimpleChannelInboundHandler<HttpObject>() {
@Override
protected void channelRead0(ChannelHandlerContext downstreamCallCtx, HttpObject msg) {
try {
// the call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (msg instanceof LastHttpContent) {
lastChunkSentDownstreamHolder.heldObject = true;
if (performSubSpanAroundDownstreamCalls) {
// Complete the subspan.
runnableWithTracingAndMdc(() -> {
Span currentSpan = Tracer.getInstance().getCurrentSpan();
if (proxyRouterProcessingState != null) {
proxyRouterProcessingState.handleTracingResponseTaggingAndFinalSpanNameIfNotAlreadyDone(currentSpan);
}
if (proxySpanTaggingStrategy.shouldAddWireReceiveFinishAnnotation()) {
currentSpan.addTimestampedAnnotationForCurrentTime(proxySpanTaggingStrategy.wireReceiveFinishAnnotationName());
}
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}, distributedSpanStackToUse, mdcContextToUse).run();
}
}
HttpObject msgToPass = msg;
if (msg instanceof HttpResponse) {
// We can't pass the original HttpResponse back to the callback due to intricacies of how
// Netty handles determining the last chunk. If we do, and the callback ends up writing
// the message out to the client (which happens during proxy routing for example), then
// msg's headers might get modified - potentially causing this channel pipeline to
// never send a LastHttpContent, which will in turn cause an indefinite hang.
HttpResponse origHttpResponse = (HttpResponse) msg;
HttpResponse httpResponse = (msg instanceof FullHttpResponse) ? new DefaultFullHttpResponse(origHttpResponse.protocolVersion(), origHttpResponse.status(), ((FullHttpResponse) msg).content()) : new DefaultHttpResponse(origHttpResponse.protocolVersion(), origHttpResponse.status());
httpResponse.headers().add(origHttpResponse.headers());
msgToPass = httpResponse;
if (proxyRouterProcessingState != null) {
proxyRouterProcessingState.setProxyHttpResponse(httpResponse);
}
if (spanForDownstreamCall != null && proxySpanTaggingStrategy.shouldAddWireReceiveStartAnnotation()) {
spanForDownstreamCall.addTimestampedAnnotationForCurrentTime(proxySpanTaggingStrategy.wireReceiveStartAnnotationName());
}
}
callback.messageReceived(msgToPass);
} else {
if (shouldLogBadMessagesAfterRequestFinishes) {
runnableWithTracingAndMdc(() -> logger.warn("Received HttpObject msg when call was not active: {}", msg), distributedSpanStackToUse, mdcContextToUse).run();
}
}
} finally {
if (msg instanceof LastHttpContent) {
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "last content chunk sent", distributedSpanStackToUse, mdcContextToUse);
}
}
}
};
Consumer<Throwable> doErrorHandlingConsumer = (cause) -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(distributedSpanStackToUse, mdcContextToUse);
// call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (proxyRouterProcessingState != null) {
proxyRouterProcessingState.setProxyError(cause);
}
if (performSubSpanAroundDownstreamCalls) {
Span currentSpan = Tracer.getInstance().getCurrentSpan();
HttpResponse proxyHttpResponseObj = (proxyRouterProcessingState == null) ? null : proxyRouterProcessingState.getProxyHttpResponse();
if (currentSpan != null && proxySpanTaggingStrategy.shouldAddErrorAnnotationForCaughtException(proxyHttpResponseObj, cause)) {
currentSpan.addTimestampedAnnotationForCurrentTime(proxySpanTaggingStrategy.errorAnnotationName(proxyHttpResponseObj, cause));
}
if (proxyRouterProcessingState != null) {
proxyRouterProcessingState.handleTracingResponseTaggingAndFinalSpanNameIfNotAlreadyDone(currentSpan);
}
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}
Tracer.getInstance().unregisterFromThread();
if (cause instanceof Errors.NativeIoException) {
// NativeIoExceptions are often setup to not have stack traces which is bad for debugging.
// Wrap it in a NativeIoExceptionWrapper that maps to a 503 since this is likely a busted
// connection and a second attempt should work.
cause = new NativeIoExceptionWrapper("Caught a NativeIoException in the downstream streaming call pipeline. Wrapped it in a " + "NativeIoExceptionWrapper so that it maps to a 503 and provides a usable stack trace " + "in the logs.", (Errors.NativeIoException) cause);
}
callback.unrecoverableErrorOccurred(cause, true);
} else {
if (cause instanceof DownstreamIdleChannelTimeoutException) {
logger.debug("A channel used for downstream calls will be closed because it was idle too long. " + "This is normal behavior and does not indicate a downstream call failure: {}", cause.toString());
} else {
logger.warn("Received exception in downstream call pipeline after the call was finished. " + "Not necessarily anything to worry about but in case it helps debugging the " + "exception was: {}", cause.toString());
}
}
} finally {
// Mark the channel as broken so it will be closed and removed from the pool when it is returned.
markChannelAsBroken(ch);
// Release it back to the pool if possible/necessary so the pool can do its usual cleanup.
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "error received in downstream pipeline: " + cause.toString(), distributedSpanStackToUse, mdcContextToUse);
// No matter what the cause is we want to make sure the channel is closed. Doing this raw ch.close()
// here will catch the cases where this channel does not have an active call but still needs to be
// closed (e.g. an idle channel timeout that happens in-between calls).
ch.close();
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
};
ChannelHandler errorHandler = new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext downstreamCallCtx, Throwable cause) {
doErrorHandlingConsumer.accept(cause);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
if (logger.isDebugEnabled()) {
runnableWithTracingAndMdc(() -> logger.debug("Downstream channel closing. call_active={}, last_chunk_sent_downstream={}, channel_id={}", callActiveHolder.heldObject, lastChunkSentDownstreamHolder.heldObject, ctx.channel().toString()), distributedSpanStackToUse, mdcContextToUse).run();
}
// We only care if the channel was closed while the call was active.
if (callActiveHolder.heldObject)
doErrorHandlingConsumer.accept(new DownstreamChannelClosedUnexpectedlyException(ch));
super.channelInactive(ctx);
}
};
// Set up the HTTP client pipeline.
ChannelPipeline p = ch.pipeline();
List<String> registeredHandlerNames = p.names();
// couldn't be removed at that time because it wasn't in the channel's eventLoop.
if (registeredHandlerNames.contains(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME)) {
ChannelHandler idleHandler = p.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
if (idleHandler != null)
p.remove(idleHandler);
}
if (debugChannelLifecycleLoggingEnabled && !registeredHandlerNames.contains(DEBUG_LOGGER_HANDLER_NAME)) {
// Add the channel debug logger if desired.
p.addFirst(DEBUG_LOGGER_HANDLER_NAME, new LoggingHandler(DOWNSTREAM_CLIENT_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
}
// Add/replace a downstream call timeout detector.
addOrReplacePipelineHandler(new DownstreamIdleChannelTimeoutHandler(downstreamCallTimeoutMillis, () -> callActiveHolder.heldObject, true, "StreamingAsyncHttpClientChannel-call-timeout", distributedSpanStackToUse, mdcContextToUse), DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, p, registeredHandlerNames);
if (isSecureHttpsCall) {
// Check and see if there's already an existing SslHandler in the pipeline. If it's pointed at the same
// host/port we need for this call, then we can leave it alone and don't need to create a new one.
boolean requiresNewSslHandler = true;
if (registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
SslHandler existingSslHandler = (SslHandler) p.get(SSL_HANDLER_NAME);
SSLEngine existingSslEngine = existingSslHandler.engine();
if (Objects.equals(downstreamHost, existingSslEngine.getPeerHost()) && Objects.equals(downstreamPort, existingSslEngine.getPeerPort())) {
// The existing SslHandler's SslEngine is pointed at the correct host/port. We don't need to
// add a new one.
requiresNewSslHandler = false;
} else {
// The existing SslHandler's SslEngine is *not* pointed at the correct host/port. We need a new one,
// so remove the old one.
p.remove(SSL_HANDLER_NAME);
}
}
if (requiresNewSslHandler) {
// SSL call and we need to add a SslHandler. Create the general-purpose reusable SslContexts if needed.
if (clientSslCtx == null) {
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init((KeyStore) null);
clientSslCtx = SslContextBuilder.forClient().trustManager(tmf).build();
}
if (insecureSslCtx == null) {
insecureSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
}
// Figure out which SslContext to use for this call.
SslContext sslCtxToUse = (relaxedHttpsValidation) ? insecureSslCtx : clientSslCtx;
// Create the SslHandler and configure the SslEngine
// as per the javadocs for SslContext.newHandler(ByteBufAllocator, String, int).
SslHandler sslHandler = sslCtxToUse.newHandler(ch.alloc(), downstreamHost, downstreamPort);
SSLEngine sslEngine = sslHandler.engine();
SSLParameters sslParameters = sslEngine.getSSLParameters();
sslParameters.setEndpointIdentificationAlgorithm(HTTPS);
sslEngine.setSSLParameters(sslParameters);
// Add the SslHandler to the pipeline in the correct location.
p.addAfter(DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, SSL_HANDLER_NAME, sslHandler);
}
} else {
// Not an SSL call. Remove the SSL handler if it's there.
if (registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
p.remove(SSL_HANDLER_NAME);
}
}
// The HttpClientCodec handler deals with HTTP codec stuff so you don't have to. Set it up if it hasn't already
// been setup, and inspect it to make sure it's in a "ready to handle a new request" state. Some rare
// and currently unknown edgecases can cause us to hit this point with the HttpClientCodec in an unclean
// state, and if we barrel forward without cleaning this up the call will fail.
boolean pipelineContainsHttpClientCodec = registeredHandlerNames.contains(HTTP_CLIENT_CODEC_HANDLER_NAME);
boolean existingHttpClientCodecIsInBadState = false;
if (pipelineContainsHttpClientCodec) {
HttpClientCodec currentCodec = (HttpClientCodec) p.get(HTTP_CLIENT_CODEC_HANDLER_NAME);
int currentHttpClientCodecInboundState = determineHttpClientCodecInboundState(currentCodec);
if (currentHttpClientCodecInboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec inbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_inbound_state={}", currentHttpClientCodecInboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
} else {
int currentHttpClientCodecOutboundState = determineHttpClientCodecOutboundState(currentCodec);
if (currentHttpClientCodecOutboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec outbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_outbound_state={}", currentHttpClientCodecOutboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
}
}
}
// or replace it if it was in a bad state.
if (!pipelineContainsHttpClientCodec || existingHttpClientCodecIsInBadState) {
addOrReplacePipelineHandler(new HttpClientCodec(4096, 8192, 8192, true), HTTP_CLIENT_CODEC_HANDLER_NAME, p, registeredHandlerNames);
}
// Update the chunk sender handler and error handler to the newly created versions that know about the correct
// callback, dtrace info, etc to use for this request.
addOrReplacePipelineHandler(chunkSenderHandler, CHUNK_SENDER_HANDLER_NAME, p, registeredHandlerNames);
addOrReplacePipelineHandler(errorHandler, ERROR_HANDLER_NAME, p, registeredHandlerNames);
}
Aggregations