use of com.nike.backstopper.exception.WrapperException in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method streamDownstreamCall.
/**
* TODO: Fully document me.
* <br/>
* NOTE: The returned CompletableFuture will only be completed successfully if the connection to the downstream
* server was successful and the initialRequestChunk was successfully written out. This has implications for
* initialRequestChunk regarding releasing its reference count (i.e. calling {@link
* io.netty.util.ReferenceCountUtil#release(Object)} and passing in initialRequestChunk). If the returned
* CompletableFuture is successful it means initialRequestChunk's reference count will already be reduced by one
* relative to when this method was called because it will have been passed to a successful {@link
* ChannelHandlerContext#writeAndFlush(Object)} method call.
* <p/>
* Long story short - assume initialRequestChunk is an object with a reference count of x:
* <ul>
* <li>
* If the returned CompletableFuture is successful, then when it completes successfully
* initialRequestChunk's reference count will be x - 1
* </li>
* <li>
* If the returned CompletableFuture is *NOT* successful, then when it completes initialRequestChunk's
* reference count will still be x
* </li>
* </ul>
*/
public CompletableFuture<StreamingChannel> streamDownstreamCall(String downstreamHost, int downstreamPort, HttpRequest initialRequestChunk, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, StreamingCallback callback, long downstreamCallTimeoutMillis, boolean performSubSpanAroundDownstreamCalls, boolean addTracingHeadersToDownstreamCall, ChannelHandlerContext ctx) {
CompletableFuture<StreamingChannel> streamingChannel = new CompletableFuture<>();
// set host header. include port in value when it is a non-default port
boolean isDefaultPort = (downstreamPort == 80 && !isSecureHttpsCall) || (downstreamPort == 443 && isSecureHttpsCall);
String hostHeaderValue = (isDefaultPort) ? downstreamHost : downstreamHost + ":" + downstreamPort;
initialRequestChunk.headers().set(HttpHeaders.Names.HOST, hostHeaderValue);
long beforeConnectionStartTimeNanos = System.nanoTime();
// Create a connection to the downstream server.
ChannelPool pool = getPooledChannelFuture(downstreamHost, downstreamPort);
Future<Channel> channelFuture = pool.acquire();
// Add a listener that kicks off the downstream call once the connection is completed.
channelFuture.addListener(future -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
long connectionSetupTimeNanos = System.nanoTime() - beforeConnectionStartTimeNanos;
HttpProcessingState httpProcessingState = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
if (httpProcessingState != null) {
RequestInfo<?> requestInfo = httpProcessingState.getRequestInfo();
if (requestInfo != null) {
requestInfo.addRequestAttribute(DOWNSTREAM_CALL_CONNECTION_SETUP_TIME_NANOS_REQUEST_ATTR_KEY, connectionSetupTimeNanos);
}
}
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(ctx);
if (logger.isDebugEnabled()) {
logger.debug("CONNECTION SETUP TIME NANOS: {}", connectionSetupTimeNanos);
}
if (!future.isSuccess()) {
try {
// We did not connect to the downstream host successfully. Notify the callback.
streamingChannel.completeExceptionally(new WrapperException("Unable to connect to downstream host: " + downstreamHost, future.cause()));
} finally {
Channel ch = channelFuture.getNow();
if (ch != null) {
// We likely will never reach here since the channel future was not successful, however if
// we *do* manage to get here somehow, then mark the channel broken and release it back
// to the pool.
markChannelAsBroken(ch);
pool.release(ch);
}
}
return;
}
// noinspection ConstantConditions
if (performSubSpanAroundDownstreamCalls) {
// Add the subspan.
String spanName = getSubspanSpanName(initialRequestChunk.getMethod().name(), downstreamHost + ":" + downstreamPort + initialRequestChunk.getUri());
if (Tracer.getInstance().getCurrentSpan() == null) {
// There is no parent span to start a subspan from, so we have to start a new span for this call
// rather than a subspan.
// TODO: Set this to CLIENT once we have that ability in the wingtips API for request root spans
Tracer.getInstance().startRequestWithRootSpan(spanName);
} else {
// There was at least one span on the stack, so we can start a subspan for this call.
Tracer.getInstance().startSubSpan(spanName, Span.SpanPurpose.CLIENT);
}
}
Deque<Span> distributedSpanStackToUse = Tracer.getInstance().getCurrentSpanStackCopy();
Map<String, String> mdcContextToUse = MDC.getCopyOfContextMap();
Span spanForDownstreamCall = (distributedSpanStackToUse == null) ? null : distributedSpanStackToUse.peek();
// Add distributed trace headers to the downstream call if desired and we have a current span.
if (addTracingHeadersToDownstreamCall && spanForDownstreamCall != null) {
HttpRequestTracingUtils.propagateTracingHeaders((headerKey, headerValue) -> {
if (headerValue != null) {
initialRequestChunk.headers().set(headerKey, headerValue);
}
}, spanForDownstreamCall);
}
Channel ch = channelFuture.getNow();
if (logger.isDebugEnabled())
logger.debug("Channel ID of the Channel pulled from the pool: {}", ch.toString());
// We may not be in the right thread to modify the channel pipeline and write data. If we're in the
// wrong thread we can get deadlock type situations. By running the relevant bits in the channel's
// event loop we're guaranteed it will be run in the correct thread.
ch.eventLoop().execute(runnableWithTracingAndMdc(() -> {
BiConsumer<String, Throwable> prepChannelErrorHandler = (errorMessage, cause) -> {
try {
streamingChannel.completeExceptionally(new WrapperException(errorMessage, cause));
} finally {
// This channel may be permanently busted depending on the error, so mark it broken and let
// the pool close it and clean it up.
markChannelAsBroken(ch);
pool.release(ch);
}
};
try {
ObjectHolder<Boolean> callActiveHolder = new ObjectHolder<>();
callActiveHolder.heldObject = true;
ObjectHolder<Boolean> lastChunkSentDownstreamHolder = new ObjectHolder<>();
lastChunkSentDownstreamHolder.heldObject = false;
// noinspection ConstantConditions
prepChannelForDownstreamCall(pool, ch, callback, distributedSpanStackToUse, mdcContextToUse, isSecureHttpsCall, relaxedHttpsValidation, performSubSpanAroundDownstreamCalls, downstreamCallTimeoutMillis, callActiveHolder, lastChunkSentDownstreamHolder);
logInitialRequestChunk(initialRequestChunk, downstreamHost, downstreamPort);
// Send the HTTP request.
ChannelFuture writeFuture = ch.writeAndFlush(initialRequestChunk);
// After the initial chunk has been sent we'll open the floodgates
// for any further chunk streaming
writeFuture.addListener(completedWriteFuture -> {
if (completedWriteFuture.isSuccess())
streamingChannel.complete(new StreamingChannel(ch, pool, callActiveHolder, lastChunkSentDownstreamHolder, distributedSpanStackToUse, mdcContextToUse));
else {
prepChannelErrorHandler.accept("Writing the first HttpRequest chunk to the downstream service failed.", completedWriteFuture.cause());
// noinspection UnnecessaryReturnStatement
return;
}
});
} catch (SSLException | NoSuchAlgorithmException | KeyStoreException ex) {
prepChannelErrorHandler.accept("Error setting up SSL context for downstream call", ex);
// noinspection UnnecessaryReturnStatement
return;
} catch (Throwable t) {
// If we don't catch and handle this here it gets swallowed since we're in a Runnable
prepChannelErrorHandler.accept("An unexpected error occurred while prepping the channel pipeline for the downstream call", t);
// noinspection UnnecessaryReturnStatement
return;
}
}, ctx));
} catch (Throwable ex) {
try {
String errorMsg = "Error occurred attempting to send first chunk (headers/etc) downstream";
Exception errorToFire = new WrapperException(errorMsg, ex);
logger.warn(errorMsg, errorToFire);
streamingChannel.completeExceptionally(errorToFire);
} finally {
Channel ch = channelFuture.getNow();
if (ch != null) {
// Depending on where the error was thrown the channel may or may not exist. If it does exist,
// then assume it's unusable, mark it as broken, and let the pool close it and remove it.
markChannelAsBroken(ch);
pool.release(ch);
}
}
} finally {
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
});
return streamingChannel;
}
use of com.nike.backstopper.exception.WrapperException in project riposte by Nike-Inc.
the class ProxyRouterEndpointExecutionHandler method registerChunkStreamingAction.
protected void registerChunkStreamingAction(ProxyRouterProcessingState proxyRouterState, HttpContent msgContent, ChannelHandlerContext ctx) {
// We have a content chunk to stream downstream. Attach the chunk processing to the proxyRouterState and
// tell it to stream itself when that future says everything is ready.
proxyRouterState.registerStreamingChannelChunkProcessingAction((sc, cause) -> {
if (releaseContentChunkIfStreamAlreadyFailed(msgContent, proxyRouterState)) {
// content has been released. Nothing left for us to do.
return;
}
if (cause == null) {
// Nothing has blown up yet, so stream this next chunk downstream. Calling streamChunk() will decrement
// the chunk's reference count (at some point in the future), allowing it to be destroyed since
// this should be the last handle on the chunk's memory.
ChannelFuture writeFuture = sc.streamChunk(msgContent);
writeFuture.addListener(future -> {
// a problem.
if (!future.isSuccess()) {
try {
String errorMsg = "Chunk streaming ChannelFuture came back as being unsuccessful. " + "downstream_channel_id=" + sc.getChannel().toString();
Throwable errorToFire = new WrapperException(errorMsg, future.cause());
StreamingCallback callback = proxyRouterState.getStreamingCallback();
if (callback != null) {
// This doesn't necessarily guarantee a broken downstream response in the case where
// the downstream system returned a response before receiving all request chunks
// (e.g. short circuit error response), so we'll call unrecoverableErrorOccurred()
// with false for the guaranteesBrokenDownstreamResponse argument. This will give
// the downstream system a chance to fully send its response if it had started
// but not yet completed by the time we hit this code on the request chunk.
callback.unrecoverableErrorOccurred(errorToFire, false);
} else {
// We have to call proxyRouterState.cancelRequestStreaming() here since we couldn't
// call callback.unrecoverableErrorOccurred(...);
proxyRouterState.cancelRequestStreaming(errorToFire, ctx);
runnableWithTracingAndMdc(() -> logger.error("Unrecoverable error occurred and somehow the StreamingCallback was " + "not available. This should not be possible. Firing the following " + "error down the pipeline manually: " + errorMsg, errorToFire), ctx).run();
executeOnlyIfChannelIsActive(ctx, "ProxyRouterEndpointExecutionHandler-streamchunk-writefuture-unsuccessful", () -> ctx.fireExceptionCaught(errorToFire));
}
} finally {
// Close down the StreamingChannel so its Channel can be released back to the pool.
sc.closeChannelDueToUnrecoverableError(future.cause());
}
} else if (msgContent instanceof LastHttpContent) {
// This msgContent was the last chunk and it was streamed successfully, so mark the proxy router
// state as having completed successfully.
proxyRouterState.setRequestStreamingCompletedSuccessfully();
}
});
} else {
StreamingChannel scToNotify = sc;
try {
// Something blew up while attempting to send a chunk to the downstream server.
if (scToNotify == null) {
// No StreamingChannel from the registration future. Try to extract it from the
// proxyRouterState directly if possible.
CompletableFuture<StreamingChannel> scFuture = proxyRouterState.getStreamingChannelCompletableFuture();
if (scFuture.isDone() && !scFuture.isCompletedExceptionally()) {
try {
scToNotify = scFuture.join();
} catch (Throwable t) {
runnableWithTracingAndMdc(() -> logger.error("What? This should never happen. Swallowing.", t), ctx).run();
}
}
}
String downstreamChannelId = (scToNotify == null) ? "UNKNOWN" : scToNotify.getChannel().toString();
String errorMsg = "Chunk streaming future came back as being unsuccessful. " + "downstream_channel_id=" + downstreamChannelId;
Throwable errorToFire = new WrapperException(errorMsg, cause);
StreamingCallback callback = proxyRouterState.getStreamingCallback();
if (callback != null) {
// This doesn't necessarily guarantee a broken downstream response in the case where
// the downstream system returned a response before receiving all request chunks
// (e.g. short circuit error response), so we'll call unrecoverableErrorOccurred()
// with false for the guaranteesBrokenDownstreamResponse argument. This will give
// the downstream system a chance to fully send its response if it had started
// but not yet completed by the time we hit this code on the request chunk.
callback.unrecoverableErrorOccurred(errorToFire, false);
} else {
runnableWithTracingAndMdc(() -> logger.error("Unrecoverable error occurred and somehow the StreamingCallback was not " + "available. This should not be possible. Firing the following error down the " + "pipeline manually: " + errorMsg, errorToFire), ctx).run();
executeOnlyIfChannelIsActive(ctx, "ProxyRouterEndpointExecutionHandler-streamchunk-unsuccessful", () -> ctx.fireExceptionCaught(errorToFire));
}
} finally {
// We were never able to call StreamingChannel.streamChunk() on this chunk, so it still has a
// dangling reference count handle that needs cleaning up. Since there's nothing left to
// do with this chunk, we can release it now.
msgContent.release();
// Close down the StreamingChannel so its Channel can be released back to the pool.
if (scToNotify != null) {
scToNotify.closeChannelDueToUnrecoverableError(cause);
} else {
@SuppressWarnings("ThrowableResultOfMethodCallIgnored") Throwable actualCause = unwrapAsyncExceptions(cause);
if (!(actualCause instanceof WrapperException)) {
runnableWithTracingAndMdc(() -> logger.error("Unable to extract StreamingChannel during error handling and the error that " + "caused it was not a WrapperException, meaning " + "StreamingAsyncHttpClient.streamDownstreamCall(...) did not properly handle it. " + "This should likely never happen and might leave things in a bad state - it " + "should be investigated and fixed! The error that caused this is: ", cause), ctx).run();
}
}
}
}
});
}
use of com.nike.backstopper.exception.WrapperException in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method streamDownstreamCall.
/**
* TODO: Fully document me.
* <br/>
* NOTE: The returned CompletableFuture will only be completed successfully if the connection to the downstream
* server was successful and the initialRequestChunk was successfully written out. This has implications for
* initialRequestChunk regarding releasing its reference count (i.e. calling {@link
* io.netty.util.ReferenceCountUtil#release(Object)} and passing in initialRequestChunk). If the returned
* CompletableFuture is successful it means initialRequestChunk's reference count will already be reduced by one
* relative to when this method was called because it will have been passed to a successful {@link
* ChannelHandlerContext#writeAndFlush(Object)} method call.
* <p/>
* Long story short - assume initialRequestChunk is an object with a reference count of x:
* <ul>
* <li>
* If the returned CompletableFuture is successful, then when it completes successfully
* initialRequestChunk's reference count will be x - 1
* </li>
* <li>
* If the returned CompletableFuture is *NOT* successful, then when it completes initialRequestChunk's
* reference count will still be x
* </li>
* </ul>
*/
public CompletableFuture<StreamingChannel> streamDownstreamCall(String downstreamHost, int downstreamPort, HttpRequest initialRequestChunk, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, StreamingCallback callback, long downstreamCallTimeoutMillis, boolean performSubSpanAroundDownstreamCalls, boolean addTracingHeadersToDownstreamCall, @NotNull ProxyRouterProcessingState proxyRouterProcessingState, @NotNull RequestInfo<?> requestInfo, ChannelHandlerContext ctx) {
CompletableFuture<StreamingChannel> streamingChannel = new CompletableFuture<>();
// set host header. include port in value when it is a non-default port
boolean isDefaultPort = (downstreamPort == 80 && !isSecureHttpsCall) || (downstreamPort == 443 && isSecureHttpsCall);
String hostHeaderValue = (isDefaultPort) ? downstreamHost : downstreamHost + ":" + downstreamPort;
initialRequestChunk.headers().set(HttpHeaderNames.HOST, hostHeaderValue);
long beforeConnectionStartTimeNanos = System.nanoTime();
// Create a connection to the downstream server.
ChannelPool pool = getPooledChannelFuture(downstreamHost, downstreamPort);
Future<Channel> channelFuture = pool.acquire();
// Add a listener that kicks off the downstream call once the connection is completed.
channelFuture.addListener(future -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
long connectionSetupTimeNanos = System.nanoTime() - beforeConnectionStartTimeNanos;
requestInfo.addRequestAttribute(DOWNSTREAM_CALL_CONNECTION_SETUP_TIME_NANOS_REQUEST_ATTR_KEY, connectionSetupTimeNanos);
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(ctx);
if (logger.isDebugEnabled()) {
logger.debug("CONNECTION SETUP TIME NANOS: {}", connectionSetupTimeNanos);
}
if (!future.isSuccess()) {
try {
// We did not connect to the downstream host successfully. Notify the callback.
streamingChannel.completeExceptionally(new WrapperException("Unable to connect to downstream host: " + downstreamHost, future.cause()));
} finally {
Channel ch = channelFuture.getNow();
if (ch != null) {
// We likely will never reach here since the channel future was not successful, however if
// we *do* manage to get here somehow, then mark the channel broken and release it back
// to the pool.
markChannelAsBroken(ch);
pool.release(ch);
}
}
return;
}
// Do a subspan around the downstream call if desired.
if (performSubSpanAroundDownstreamCalls) {
// TODO: The subspan start stuff should probably be moved to the beginning of
// streamDownstreamCall(), so that we pick up connection setup time (and can annotate conn
// start/finish). For now, we'll fake it by annotating conn start/finish time on the subspan
// at a negative time offset. So they'll be "in the past" from the perspective of the subspan.
// Add the subspan.
String spanName = getSubspanSpanName(initialRequestChunk, requestInfo, proxySpanTaggingStrategy);
// Start a new child/subspan for this call if possible, falling back to a new request span (rather
// than child/subspan) if there's no current span on the thread. The
// startSpanInCurrentContext() method will do the right thing here in either case.
Span subspan = Tracer.getInstance().startSpanInCurrentContext(spanName, Span.SpanPurpose.CLIENT);
// Do the auto-tagging based on the request.
proxySpanTaggingStrategy.handleRequestTagging(subspan, initialRequestChunk);
// this is something we definitely want for proxy/router requests.
try {
subspan.putTag(KnownZipkinTags.HTTP_HOST, downstreamHost + ":" + downstreamPort);
} catch (Throwable t) {
logger.error("An unexpected error occurred while adding downstream host and port tags. The error will " + "be swallowed to avoid doing any damage, but your span may be missing some expected " + "tags. This error should be fixed.", t);
}
// Add the initial HttpRequest to our ProxyRouterProcessingState so it's available for final
// response tagging and span naming at the end.
proxyRouterProcessingState.setProxyHttpRequest(initialRequestChunk);
// start time.
if (proxySpanTaggingStrategy.shouldAddConnStartAnnotation()) {
subspan.addTimestampedAnnotation(TimestampedAnnotation.forEpochMicrosWithNanoOffset(subspan.getSpanStartTimeEpochMicros(), -connectionSetupTimeNanos, proxySpanTaggingStrategy.connStartAnnotationName()));
}
if (proxySpanTaggingStrategy.shouldAddConnFinishAnnotation()) {
subspan.addTimestampedAnnotation(TimestampedAnnotation.forEpochMicros(subspan.getSpanStartTimeEpochMicros(), proxySpanTaggingStrategy.connFinishAnnotationName()));
}
}
Deque<Span> distributedSpanStackToUse = Tracer.getInstance().getCurrentSpanStackCopy();
Map<String, String> mdcContextToUse = MDC.getCopyOfContextMap();
@Nullable final Span spanForDownstreamCall = (distributedSpanStackToUse == null) ? null : distributedSpanStackToUse.peek();
// Add distributed trace headers to the downstream call if desired and we have a current span.
if (addTracingHeadersToDownstreamCall && spanForDownstreamCall != null) {
HttpRequestTracingUtils.propagateTracingHeaders((headerKey, headerValue) -> {
if (headerValue != null) {
initialRequestChunk.headers().set(headerKey, headerValue);
}
}, spanForDownstreamCall);
}
Channel ch = channelFuture.getNow();
if (logger.isDebugEnabled())
logger.debug("Channel ID of the Channel pulled from the pool: {}", ch.toString());
// We may not be in the right thread to modify the channel pipeline and write data. If we're in the
// wrong thread we can get deadlock type situations. By running the relevant bits in the channel's
// event loop we're guaranteed it will be run in the correct thread.
ch.eventLoop().execute(runnableWithTracingAndMdc(() -> {
BiConsumer<String, Throwable> prepChannelErrorHandler = (errorMessage, cause) -> {
try {
streamingChannel.completeExceptionally(new WrapperException(errorMessage, cause));
} finally {
// This channel may be permanently busted depending on the error, so mark it broken and let
// the pool close it and clean it up.
markChannelAsBroken(ch);
pool.release(ch);
}
};
try {
ObjectHolder<Boolean> callActiveHolder = new ObjectHolder<>();
callActiveHolder.heldObject = true;
ObjectHolder<Boolean> lastChunkSentDownstreamHolder = new ObjectHolder<>();
lastChunkSentDownstreamHolder.heldObject = false;
prepChannelForDownstreamCall(downstreamHost, downstreamPort, pool, ch, callback, distributedSpanStackToUse, mdcContextToUse, isSecureHttpsCall, relaxedHttpsValidation, performSubSpanAroundDownstreamCalls, downstreamCallTimeoutMillis, callActiveHolder, lastChunkSentDownstreamHolder, proxyRouterProcessingState, spanForDownstreamCall);
logInitialRequestChunk(initialRequestChunk, downstreamHost, downstreamPort);
// Send the HTTP request, and do a wire-send start annotation on the subspan if desired.
if (spanForDownstreamCall != null && proxySpanTaggingStrategy.shouldAddWireSendStartAnnotation()) {
spanForDownstreamCall.addTimestampedAnnotationForCurrentTime(proxySpanTaggingStrategy.wireSendStartAnnotationName());
}
ChannelFuture writeFuture = ch.writeAndFlush(initialRequestChunk);
// After the initial chunk has been sent we'll open the floodgates
// for any further chunk streaming
writeFuture.addListener(completedWriteFuture -> {
if (completedWriteFuture.isSuccess())
streamingChannel.complete(new StreamingChannel(ch, pool, callActiveHolder, lastChunkSentDownstreamHolder, distributedSpanStackToUse, mdcContextToUse, spanForDownstreamCall, proxySpanTaggingStrategy));
else {
prepChannelErrorHandler.accept("Writing the first HttpRequest chunk to the downstream service failed.", completedWriteFuture.cause());
// noinspection UnnecessaryReturnStatement
return;
}
});
} catch (SSLException | NoSuchAlgorithmException | KeyStoreException ex) {
prepChannelErrorHandler.accept("Error setting up SSL context for downstream call", ex);
// noinspection UnnecessaryReturnStatement
return;
} catch (Throwable t) {
// If we don't catch and handle this here it gets swallowed since we're in a Runnable
prepChannelErrorHandler.accept("An unexpected error occurred while prepping the channel pipeline for the downstream call", t);
// noinspection UnnecessaryReturnStatement
return;
}
}, ctx));
} catch (Throwable ex) {
try {
String errorMsg = "Error occurred attempting to send first chunk (headers/etc) downstream";
Exception errorToFire = new WrapperException(errorMsg, ex);
logger.warn(errorMsg, errorToFire);
streamingChannel.completeExceptionally(errorToFire);
} finally {
Channel ch = channelFuture.getNow();
if (ch != null) {
// Depending on where the error was thrown the channel may or may not exist. If it does exist,
// then assume it's unusable, mark it as broken, and let the pool close it and remove it.
markChannelAsBroken(ch);
pool.release(ch);
}
}
} finally {
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
});
return streamingChannel;
}
Aggregations