use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project camel by apache.
the class NettyProducer method processWithConnectedChannel.
public void processWithConnectedChannel(final Exchange exchange, final BodyReleaseCallback callback, final ChannelFuture channelFuture, final Object body) {
// remember channel so we can reuse it
final Channel channel = channelFuture.channel();
if (getConfiguration().isReuseChannel() && exchange.getProperty(NettyConstants.NETTY_CHANNEL) == null) {
exchange.setProperty(NettyConstants.NETTY_CHANNEL, channel);
// and defer closing the channel until we are done routing the exchange
exchange.addOnCompletion(new SynchronizationAdapter() {
@Override
public void onComplete(Exchange exchange) {
// should channel be closed after complete?
Boolean close;
if (ExchangeHelper.isOutCapable(exchange)) {
close = exchange.getOut().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
} else {
close = exchange.getIn().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
}
// should we disconnect, the header can override the configuration
boolean disconnect = getConfiguration().isDisconnect();
if (close != null) {
disconnect = close;
}
if (disconnect) {
LOG.trace("Closing channel {} as routing the Exchange is done", channel);
NettyHelper.close(channel);
}
releaseChannel(channelFuture);
}
});
}
if (exchange.getIn().getHeader(NettyConstants.NETTY_REQUEST_TIMEOUT) != null) {
long timeoutInMs = exchange.getIn().getHeader(NettyConstants.NETTY_REQUEST_TIMEOUT, Long.class);
ChannelHandler oldHandler = channel.pipeline().get("timeout");
ReadTimeoutHandler newHandler = new ReadTimeoutHandler(timeoutInMs, TimeUnit.MILLISECONDS);
if (oldHandler == null) {
channel.pipeline().addBefore("handler", "timeout", newHandler);
} else {
channel.pipeline().replace(oldHandler, "timeout", newHandler);
}
}
//This will refer to original callback since netty will release body by itself
final AsyncCallback producerCallback;
if (configuration.isReuseChannel()) {
// use callback as-is because we should not put it back in the pool as NettyProducerCallback would do
// as when reuse channel is enabled it will put the channel back in the pool when exchange is done using on completion
producerCallback = callback.getOriginalCallback();
} else {
producerCallback = new NettyProducerCallback(channelFuture, callback.getOriginalCallback());
}
// setup state as attachment on the channel, so we can access the state later when needed
putState(channel, new NettyCamelState(producerCallback, exchange));
// here we need to setup the remote address information here
InetSocketAddress remoteAddress = null;
if (!isTcp()) {
remoteAddress = new InetSocketAddress(configuration.getHost(), configuration.getPort());
}
// write body
NettyHelper.writeBodyAsync(LOG, channel, remoteAddress, body, exchange, new ChannelFutureListener() {
public void operationComplete(ChannelFuture channelFuture) throws Exception {
LOG.trace("Operation complete {}", channelFuture);
if (!channelFuture.isSuccess()) {
// no success then exit, (any exception has been handled by ClientChannelHandler#exceptionCaught)
return;
}
// if we do not expect any reply then signal callback to continue routing
if (!configuration.isSync()) {
try {
// should channel be closed after complete?
Boolean close;
if (ExchangeHelper.isOutCapable(exchange)) {
close = exchange.getOut().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
} else {
close = exchange.getIn().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
}
// should we disconnect, the header can override the configuration
boolean disconnect = getConfiguration().isDisconnect();
if (close != null) {
disconnect = close;
}
// we should not close if we are reusing the channel
if (!configuration.isReuseChannel() && disconnect) {
if (LOG.isTraceEnabled()) {
LOG.trace("Closing channel when complete at address: {}", getEndpoint().getConfiguration().getAddress());
}
NettyHelper.close(channel);
}
} finally {
// signal callback to continue routing
producerCallback.done(false);
}
}
}
});
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project camel by apache.
the class ClientChannelHandler method channelRead0.
@Override
protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
messageReceived = true;
if (LOG.isTraceEnabled()) {
LOG.trace("Message received: {}", msg);
}
ChannelHandler handler = ctx.pipeline().get("timeout");
if (handler != null) {
LOG.trace("Removing timeout channel as we received message");
ctx.pipeline().remove(handler);
}
Exchange exchange = getExchange(ctx);
if (exchange == null) {
// we just ignore the received message as the channel is closed
return;
}
AsyncCallback callback = getAsyncCallback(ctx);
Message message;
try {
message = getResponseMessage(exchange, ctx, msg);
} catch (Exception e) {
exchange.setException(e);
callback.done(false);
return;
}
// set the result on either IN or OUT on the original exchange depending on its pattern
if (ExchangeHelper.isOutCapable(exchange)) {
exchange.setOut(message);
} else {
exchange.setIn(message);
}
try {
// should channel be closed after complete?
Boolean close;
if (ExchangeHelper.isOutCapable(exchange)) {
close = exchange.getOut().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
} else {
close = exchange.getIn().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
}
// check the setting on the exchange property
if (close == null) {
close = exchange.getProperty(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
}
// should we disconnect, the header can override the configuration
boolean disconnect = producer.getConfiguration().isDisconnect();
if (close != null) {
disconnect = close;
}
// we should not close if we are reusing the channel
if (!producer.getConfiguration().isReuseChannel() && disconnect) {
if (LOG.isTraceEnabled()) {
LOG.trace("Closing channel when complete at address: {}", producer.getConfiguration().getAddress());
}
NettyHelper.close(ctx.channel());
}
} finally {
// signal callback
callback.done(false);
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project flink by apache.
the class NettyServerLowAndHighWatermarkTest method testLowAndHighWatermarks.
/**
* Verifies that the high and low watermark are set in relation to the page size.
*
* <p> The high and low water marks control the data flow to the wire. If the Netty write buffer
* has size greater or equal to the high water mark, the channel state becomes not-writable.
* Only when the size falls below the low water mark again, the state changes to writable again.
*
* <p> The Channel writability state needs to be checked by the handler when writing to the
* channel and is not enforced in the sense that you cannot write a channel, which is in
* not-writable state.
*/
@Test
public void testLowAndHighWatermarks() throws Throwable {
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
final NettyProtocol protocol = new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
// The channel handler implements the test
return new ChannelHandler[] { new TestLowAndHighWatermarkHandler(error) };
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new ChannelHandler[0];
}
};
final NettyConfig conf = createConfig(PageSize);
final NettyServerAndClient serverAndClient = initServerAndClient(protocol, conf);
try {
// We can't just check the config of this channel as it is the client's channel. We need
// to check the server channel, because it is doing the data transfers.
final Channel ch = connect(serverAndClient);
// Wait for the channel to be closed
awaitClose(ch);
final Throwable t = error.get();
if (t != null) {
throw t;
}
} finally {
shutdown(serverAndClient);
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project riposte by Nike-Inc.
the class HttpChannelInitializerTest method initChannel_adds_HttpRequestDecoder_with_config_values_coming_from_httpRequestDecoderConfig.
@Test
public void initChannel_adds_HttpRequestDecoder_with_config_values_coming_from_httpRequestDecoderConfig() {
// given
HttpChannelInitializer hci = basicHttpChannelInitializerNoUtilityHandlers();
HttpRequestDecoderConfig configWithCustomValues = new HttpRequestDecoderConfig() {
@Override
public int maxInitialLineLength() {
return 123;
}
@Override
public int maxHeaderSize() {
return 456;
}
@Override
public int maxChunkSize() {
return 789;
}
};
Whitebox.setInternalState(hci, "httpRequestDecoderConfig", configWithCustomValues);
// when
hci.initChannel(socketChannelMock);
// then
ArgumentCaptor<ChannelHandler> channelHandlerArgumentCaptor = ArgumentCaptor.forClass(ChannelHandler.class);
verify(channelPipelineMock, atLeastOnce()).addLast(anyString(), channelHandlerArgumentCaptor.capture());
List<ChannelHandler> handlers = channelHandlerArgumentCaptor.getAllValues();
Pair<Integer, HttpRequestDecoder> requestDecoderHandlerPair = findChannelHandler(handlers, HttpRequestDecoder.class);
Assertions.assertThat(requestDecoderHandlerPair).isNotNull();
HttpRequestDecoder decoderHandler = requestDecoderHandlerPair.getValue();
int actualMaxInitialLineLength = extractField(extractField(decoderHandler, "lineParser"), "maxLength");
int actualMaxHeaderSize = extractField(extractField(decoderHandler, "headerParser"), "maxLength");
int actualMaxChunkSize = extractField(decoderHandler, "maxChunkSize");
Assertions.assertThat(actualMaxInitialLineLength).isEqualTo(configWithCustomValues.maxInitialLineLength());
Assertions.assertThat(actualMaxHeaderSize).isEqualTo(configWithCustomValues.maxHeaderSize());
Assertions.assertThat(actualMaxChunkSize).isEqualTo(configWithCustomValues.maxChunkSize());
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method prepChannelForDownstreamCall.
protected void prepChannelForDownstreamCall(ChannelPool pool, Channel ch, StreamingCallback callback, Deque<Span> distributedSpanStackToUse, Map<String, String> mdcContextToUse, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, boolean performSubSpanAroundDownstreamCalls, long downstreamCallTimeoutMillis, ObjectHolder<Boolean> callActiveHolder, ObjectHolder<Boolean> lastChunkSentDownstreamHolder) throws SSLException, NoSuchAlgorithmException, KeyStoreException {
ChannelHandler chunkSenderHandler = new SimpleChannelInboundHandler<HttpObject>() {
@Override
protected void channelRead0(ChannelHandlerContext downstreamCallCtx, HttpObject msg) throws Exception {
try {
// the call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (msg instanceof LastHttpContent) {
lastChunkSentDownstreamHolder.heldObject = true;
if (performSubSpanAroundDownstreamCalls) {
// Complete the subspan.
runnableWithTracingAndMdc(() -> {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}, distributedSpanStackToUse, mdcContextToUse).run();
}
}
HttpObject msgToPass = msg;
if (msg instanceof HttpResponse) {
// We can't pass the original HttpResponse back to the callback due to intricacies of how
// Netty handles determining the last chunk. If we do, and the callback ends up writing
// the message out to the client (which happens during proxy routing for example), then
// msg's headers might get modified - potentially causing this channel pipeline to
// never send a LastHttpContent, which will in turn cause an indefinite hang.
HttpResponse origHttpResponse = (HttpResponse) msg;
HttpResponse httpResponse = (msg instanceof FullHttpResponse) ? new DefaultFullHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus(), ((FullHttpResponse) msg).content()) : new DefaultHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus());
httpResponse.headers().add(origHttpResponse.headers());
msgToPass = httpResponse;
}
callback.messageReceived(msgToPass);
} else {
if (shouldLogBadMessagesAfterRequestFinishes) {
runnableWithTracingAndMdc(() -> logger.warn("Received HttpObject msg when call was not active: {}", String.valueOf(msg)), distributedSpanStackToUse, mdcContextToUse).run();
}
}
} finally {
if (msg instanceof LastHttpContent) {
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "last content chunk sent", distributedSpanStackToUse, mdcContextToUse);
}
}
}
};
Consumer<Throwable> doErrorHandlingConsumer = (cause) -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(distributedSpanStackToUse, mdcContextToUse);
// call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (performSubSpanAroundDownstreamCalls) {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}
Tracer.getInstance().unregisterFromThread();
if (cause instanceof Errors.NativeIoException) {
// NativeIoExceptions are often setup to not have stack traces which is bad for debugging.
// Wrap it in a NativeIoExceptionWrapper that maps to a 503 since this is likely a busted
// connection and a second attempt should work.
cause = new NativeIoExceptionWrapper("Caught a NativeIoException in the downstream streaming call pipeline. Wrapped it in a " + "NativeIoExceptionWrapper so that it maps to a 503 and provides a usable stack trace " + "in the logs.", (Errors.NativeIoException) cause);
}
callback.unrecoverableErrorOccurred(cause, true);
} else {
if (cause instanceof DownstreamIdleChannelTimeoutException) {
logger.debug("A channel used for downstream calls will be closed because it was idle too long. " + "This is normal behavior and does not indicate a downstream call failure: {}", cause.toString());
} else {
logger.warn("Received exception in downstream call pipeline after the call was finished. " + "Not necessarily anything to worry about but in case it helps debugging the " + "exception was: {}", cause.toString());
}
}
} finally {
// Mark the channel as broken so it will be closed and removed from the pool when it is returned.
markChannelAsBroken(ch);
// Release it back to the pool if possible/necessary so the pool can do its usual cleanup.
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "error received in downstream pipeline: " + cause.toString(), distributedSpanStackToUse, mdcContextToUse);
// No matter what the cause is we want to make sure the channel is closed. Doing this raw ch.close()
// here will catch the cases where this channel does not have an active call but still needs to be
// closed (e.g. an idle channel timeout that happens in-between calls).
ch.close();
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
};
ChannelHandler errorHandler = new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext downstreamCallCtx, Throwable cause) throws Exception {
doErrorHandlingConsumer.accept(cause);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
if (logger.isDebugEnabled()) {
runnableWithTracingAndMdc(() -> logger.debug("Downstream channel closing. call_active={}, last_chunk_sent_downstream={}, channel_id={}", callActiveHolder.heldObject, lastChunkSentDownstreamHolder.heldObject, ctx.channel().toString()), distributedSpanStackToUse, mdcContextToUse).run();
}
// We only care if the channel was closed while the call was active.
if (callActiveHolder.heldObject)
doErrorHandlingConsumer.accept(new DownstreamChannelClosedUnexpectedlyException(ch));
super.channelInactive(ctx);
}
};
// Set up the HTTP client pipeline.
ChannelPipeline p = ch.pipeline();
List<String> registeredHandlerNames = p.names();
// couldn't be removed at that time because it wasn't in the channel's eventLoop.
if (registeredHandlerNames.contains(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME)) {
ChannelHandler idleHandler = p.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
if (idleHandler != null)
p.remove(idleHandler);
}
if (debugChannelLifecycleLoggingEnabled && !registeredHandlerNames.contains(DEBUG_LOGGER_HANDLER_NAME)) {
// Add the channel debug logger if desired.
p.addFirst(DEBUG_LOGGER_HANDLER_NAME, new LoggingHandler(DOWNSTREAM_CLIENT_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
}
// Add/replace a downstream call timeout detector.
addOrReplacePipelineHandler(new DownstreamIdleChannelTimeoutHandler(downstreamCallTimeoutMillis, () -> callActiveHolder.heldObject, true, "StreamingAsyncHttpClientChannel-call-timeout", distributedSpanStackToUse, mdcContextToUse), DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, p, registeredHandlerNames);
if (isSecureHttpsCall) {
// SSL call. Make sure we add the SSL handler if necessary.
if (!registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
if (clientSslCtx == null) {
if (relaxedHttpsValidation) {
clientSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init((KeyStore) null);
clientSslCtx = SslContextBuilder.forClient().trustManager(tmf).build();
}
}
p.addAfter(DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, SSL_HANDLER_NAME, clientSslCtx.newHandler(ch.alloc()));
}
} else {
// Not an SSL call. Remove the SSL handler if it's there.
if (registeredHandlerNames.contains(SSL_HANDLER_NAME))
p.remove(SSL_HANDLER_NAME);
}
// The HttpClientCodec handler deals with HTTP codec stuff so you don't have to. Set it up if it hasn't already
// been setup, and inspect it to make sure it's in a "ready to handle a new request" state. Some rare
// and currently unknown edgecases can cause us to hit this point with the HttpClientCodec in an unclean
// state, and if we barrel forward without cleaning this up the call will fail.
boolean pipelineContainsHttpClientCodec = registeredHandlerNames.contains(HTTP_CLIENT_CODEC_HANDLER_NAME);
boolean existingHttpClientCodecIsInBadState = false;
if (pipelineContainsHttpClientCodec) {
HttpClientCodec currentCodec = (HttpClientCodec) p.get(HTTP_CLIENT_CODEC_HANDLER_NAME);
int currentHttpClientCodecInboundState = determineHttpClientCodecInboundState(currentCodec);
if (currentHttpClientCodecInboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec inbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_inbound_state={}", currentHttpClientCodecInboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
} else {
int currentHttpClientCodecOutboundState = determineHttpClientCodecOutboundState(currentCodec);
if (currentHttpClientCodecOutboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec outbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_outbound_state={}", currentHttpClientCodecOutboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
}
}
}
// or replace it if it was in a bad state.
if (!pipelineContainsHttpClientCodec || existingHttpClientCodecIsInBadState) {
addOrReplacePipelineHandler(new HttpClientCodec(4096, 8192, 8192, true), HTTP_CLIENT_CODEC_HANDLER_NAME, p, registeredHandlerNames);
}
// Update the chunk sender handler and error handler to the newly created versions that know about the correct
// callback, dtrace info, etc to use for this request.
addOrReplacePipelineHandler(chunkSenderHandler, CHUNK_SENDER_HANDLER_NAME, p, registeredHandlerNames);
addOrReplacePipelineHandler(errorHandler, ERROR_HANDLER_NAME, p, registeredHandlerNames);
}
Aggregations