use of io.netty.handler.codec.http.HttpContent in project riposte by Nike-Inc.
the class HttpUtilsTest method convertContentChunksToRawBytes_returns_null_if_total_bytes_is_zero.
@Test
public void convertContentChunksToRawBytes_returns_null_if_total_bytes_is_zero() {
// given
Collection<HttpContent> chunkCollection = Arrays.asList(new DefaultHttpContent(new EmptyByteBuf(ByteBufAllocator.DEFAULT)), new DefaultHttpContent(new EmptyByteBuf(ByteBufAllocator.DEFAULT)));
// when
byte[] resultBytes = HttpUtils.convertContentChunksToRawBytes(chunkCollection);
// then
assertThat(resultBytes, nullValue());
}
use of io.netty.handler.codec.http.HttpContent in project riposte by Nike-Inc.
the class RequestInfoImplTest method releaseContentChunks_clear_on_chunk_list_but_does_not_release_chunks_if_contentChunksWillBeReleasedExternally_is_true.
@Test
public void releaseContentChunks_clear_on_chunk_list_but_does_not_release_chunks_if_contentChunksWillBeReleasedExternally_is_true() {
// given
RequestInfoImpl<?> requestInfo = RequestInfoImpl.dummyInstanceForUnknownRequests();
requestInfo.contentChunksWillBeReleasedExternally();
List<HttpContent> contentChunkList = Arrays.asList(mock(HttpContent.class), mock(HttpContent.class));
requestInfo.contentChunks.addAll(contentChunkList);
assertThat(requestInfo.contentChunks.size(), is(contentChunkList.size()));
// when
requestInfo.releaseContentChunks();
// then
for (HttpContent chunkMock : contentChunkList) {
verify(chunkMock, never()).release();
}
assertThat(requestInfo.contentChunks.isEmpty(), is(true));
}
use of io.netty.handler.codec.http.HttpContent in project riposte by Nike-Inc.
the class ProcessFinalResponseOutputHandler method write.
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
// Deal with the final outbound HttpResponse
if (msg instanceof HttpResponse) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
if (state != null)
state.setActualResponseObject((HttpResponse) msg);
}
// Deal with the final outbound body content
if (msg instanceof HttpContent) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
if (state != null && state.getResponseInfo() != null) {
ResponseInfo<?> responseInfo = state.getResponseInfo();
long contentBytes = ((HttpContent) msg).content().readableBytes();
if (responseInfo.getFinalContentLength() == null)
responseInfo.setFinalContentLength(contentBytes);
else
responseInfo.setFinalContentLength(responseInfo.getFinalContentLength() + contentBytes);
}
}
super.write(ctx, msg, promise);
}
use of io.netty.handler.codec.http.HttpContent in project riposte by Nike-Inc.
the class ProxyRouterEndpointExecutionHandler method doChannelRead.
@Override
public PipelineContinuationBehavior doChannelRead(ChannelHandlerContext ctx, Object msg) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
Endpoint<?> endpoint = state.getEndpointForExecution();
if (shouldHandleDoChannelReadMessage(msg, endpoint)) {
ProxyRouterProcessingState proxyRouterState = getOrCreateProxyRouterProcessingState(ctx);
ProxyRouterEndpoint endpointProxyRouter = ((ProxyRouterEndpoint) endpoint);
RequestInfo<?> requestInfo = state.getRequestInfo();
if (msg instanceof HttpRequest) {
if (requestInfo instanceof RiposteInternalRequestInfo) {
// Tell this RequestInfo that we'll be managing the release of content chunks, so that when
// RequestInfo.releaseAllResources() is called we don't have extra reference count removals.
((RiposteInternalRequestInfo) requestInfo).contentChunksWillBeReleasedExternally();
}
// We're supposed to start streaming. There may be pre-endpoint-execution validation logic or other work
// that needs to happen before the endpoint is executed, so set up the CompletableFuture for the
// endpoint call to only execute if the pre-endpoint-execution validation/work chain is successful.
CompletableFuture<DownstreamRequestFirstChunkInfo> firstChunkFuture = state.getPreEndpointExecutionWorkChain().thenCompose(functionWithTracingAndMdc(aVoid -> endpointProxyRouter.getDownstreamRequestFirstChunkInfo(requestInfo, longRunningTaskExecutor, ctx), ctx));
Long endpointTimeoutOverride = endpointProxyRouter.completableFutureTimeoutOverrideMillis();
long callTimeoutValueToUse = (endpointTimeoutOverride == null) ? defaultCompletableFutureTimeoutMillis : endpointTimeoutOverride;
// When the first chunk is ready, stream it downstream and set up what happens afterward.
firstChunkFuture.whenComplete((downstreamRequestFirstChunkInfo, throwable) -> {
Optional<ManualModeTask<HttpResponse>> circuitBreakerManualTask = getCircuitBreaker(downstreamRequestFirstChunkInfo, ctx).map(CircuitBreaker::newManualModeTask);
StreamingCallback callback = new StreamingCallbackForCtx(ctx, circuitBreakerManualTask, endpointProxyRouter, requestInfo, proxyRouterState);
if (throwable != null) {
// Something blew up trying to determine the first chunk info.
callback.unrecoverableErrorOccurred(throwable, true);
} else if (!ctx.channel().isOpen()) {
// The channel was closed for some reason before we were able to start streaming.
String errorMsg = "The channel from the original caller was closed before we could begin the " + "downstream call.";
Exception channelClosedException = new RuntimeException(errorMsg);
runnableWithTracingAndMdc(() -> logger.warn(errorMsg), ctx).run();
callback.unrecoverableErrorOccurred(channelClosedException, true);
} else {
try {
// Ok we have the first chunk info. Start by setting the downstream call info in the request
// info (i.e. for access logs if desired)
requestInfo.addRequestAttribute(DOWNSTREAM_CALL_PATH_REQUEST_ATTR_KEY, HttpUtils.extractPath(downstreamRequestFirstChunkInfo.firstChunk.uri()));
// Try our circuit breaker (if we have one).
Throwable circuitBreakerException = null;
try {
circuitBreakerManualTask.ifPresent(ManualModeTask::throwExceptionIfCircuitBreakerIsOpen);
} catch (Throwable t) {
circuitBreakerException = t;
}
if (circuitBreakerException == null) {
// No circuit breaker, or the breaker is closed. We can now stream the first chunk info.
String downstreamHost = downstreamRequestFirstChunkInfo.host;
int downstreamPort = downstreamRequestFirstChunkInfo.port;
HttpRequest downstreamRequestFirstChunk = downstreamRequestFirstChunkInfo.firstChunk;
boolean isSecureHttpsCall = downstreamRequestFirstChunkInfo.isHttps;
boolean relaxedHttpsValidation = downstreamRequestFirstChunkInfo.relaxedHttpsValidation;
boolean performSubSpanAroundDownstreamCall = downstreamRequestFirstChunkInfo.performSubSpanAroundDownstreamCall;
boolean addTracingHeadersToDownstreamCall = downstreamRequestFirstChunkInfo.addTracingHeadersToDownstreamCall;
// Tell the proxyRouterState about the streaming callback so that
// callback.unrecoverableErrorOccurred(...) can be called in the case of an error
// on subsequent chunks.
proxyRouterState.setStreamingCallback(callback);
// Setup the streaming channel future with everything it needs to kick off the
// downstream request.
proxyRouterState.setStreamingStartTimeNanos(System.nanoTime());
CompletableFuture<StreamingChannel> streamingChannel = streamingAsyncHttpClient.streamDownstreamCall(downstreamHost, downstreamPort, downstreamRequestFirstChunk, isSecureHttpsCall, relaxedHttpsValidation, callback, callTimeoutValueToUse, performSubSpanAroundDownstreamCall, addTracingHeadersToDownstreamCall, proxyRouterState, requestInfo, ctx);
// Tell the streaming channel future what to do when it completes.
streamingChannel = streamingChannel.whenComplete((sc, cause) -> {
if (cause == null) {
// Successfully connected and sent the first chunk. We can now safely let
// the remaining content chunks through for streaming.
proxyRouterState.triggerChunkProcessing(sc);
} else {
// Something blew up while connecting to the downstream server.
callback.unrecoverableErrorOccurred(cause, true);
}
});
// Set the streaming channel future on the state so it can be connected to.
proxyRouterState.setStreamingChannelCompletableFuture(streamingChannel);
} else {
// Circuit breaker is tripped (or otherwise threw an unexpected exception). Immediately
// short circuit the error back to the client.
callback.unrecoverableErrorOccurred(circuitBreakerException, true);
}
} catch (Throwable t) {
callback.unrecoverableErrorOccurred(t, true);
}
}
});
} else if (msg instanceof HttpContent) {
HttpContent msgContent = (HttpContent) msg;
// chunk-streaming behavior and subsequent cleanup for the given HttpContent.
if (!releaseContentChunkIfStreamAlreadyFailed(msgContent, proxyRouterState)) {
registerChunkStreamingAction(proxyRouterState, msgContent, ctx);
}
}
return PipelineContinuationBehavior.DO_NOT_FIRE_CONTINUE_EVENT;
}
return PipelineContinuationBehavior.CONTINUE;
}
use of io.netty.handler.codec.http.HttpContent in project async-http-client by AsyncHttpClient.
the class AsyncHttpClientHandler method channelRead.
@Override
public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exception {
Channel channel = ctx.channel();
Object attribute = Channels.getAttribute(channel);
try {
if (attribute instanceof OnLastHttpContentCallback) {
if (msg instanceof LastHttpContent) {
((OnLastHttpContentCallback) attribute).call();
}
} else if (attribute instanceof NettyResponseFuture) {
NettyResponseFuture<?> future = (NettyResponseFuture<?>) attribute;
future.touch();
handleRead(channel, future, msg);
} else if (attribute instanceof StreamedResponsePublisher) {
StreamedResponsePublisher publisher = (StreamedResponsePublisher) attribute;
publisher.future().touch();
if (msg instanceof HttpContent) {
ByteBuf content = ((HttpContent) msg).content();
// Republish as a HttpResponseBodyPart
if (content.isReadable()) {
HttpResponseBodyPart part = config.getResponseBodyPartFactory().newResponseBodyPart(content, false);
ctx.fireChannelRead(part);
}
if (msg instanceof LastHttpContent) {
// Remove the handler from the pipeline, this will trigger
// it to finish
ctx.pipeline().remove(publisher);
// Trigger a read, just in case the last read complete
// triggered no new read
ctx.read();
// Send the last content on to the protocol, so that it can
// conclude the cleanup
handleRead(channel, publisher.future(), msg);
}
} else {
logger.info("Received unexpected message while expecting a chunk: " + msg);
ctx.pipeline().remove(publisher);
Channels.setDiscard(channel);
}
} else if (attribute != DiscardEvent.DISCARD) {
// unhandled message
logger.debug("Orphan channel {} with attribute {} received message {}, closing", channel, attribute, msg);
Channels.silentlyCloseChannel(channel);
}
} finally {
ReferenceCountUtil.release(msg);
}
}
Aggregations