Search in sources :

Example 11 with ChannelHandler

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project camel by apache.

the class NettyProducer method processWithConnectedChannel.

public void processWithConnectedChannel(final Exchange exchange, final BodyReleaseCallback callback, final ChannelFuture channelFuture, final Object body) {
    // remember channel so we can reuse it
    final Channel channel = channelFuture.channel();
    if (getConfiguration().isReuseChannel() && exchange.getProperty(NettyConstants.NETTY_CHANNEL) == null) {
        exchange.setProperty(NettyConstants.NETTY_CHANNEL, channel);
        // and defer closing the channel until we are done routing the exchange
        exchange.addOnCompletion(new SynchronizationAdapter() {

            @Override
            public void onComplete(Exchange exchange) {
                // should channel be closed after complete?
                Boolean close;
                if (ExchangeHelper.isOutCapable(exchange)) {
                    close = exchange.getOut().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
                } else {
                    close = exchange.getIn().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
                }
                // should we disconnect, the header can override the configuration
                boolean disconnect = getConfiguration().isDisconnect();
                if (close != null) {
                    disconnect = close;
                }
                if (disconnect) {
                    LOG.trace("Closing channel {} as routing the Exchange is done", channel);
                    NettyHelper.close(channel);
                }
                releaseChannel(channelFuture);
            }
        });
    }
    if (exchange.getIn().getHeader(NettyConstants.NETTY_REQUEST_TIMEOUT) != null) {
        long timeoutInMs = exchange.getIn().getHeader(NettyConstants.NETTY_REQUEST_TIMEOUT, Long.class);
        ChannelHandler oldHandler = channel.pipeline().get("timeout");
        ReadTimeoutHandler newHandler = new ReadTimeoutHandler(timeoutInMs, TimeUnit.MILLISECONDS);
        if (oldHandler == null) {
            channel.pipeline().addBefore("handler", "timeout", newHandler);
        } else {
            channel.pipeline().replace(oldHandler, "timeout", newHandler);
        }
    }
    //This will refer to original callback since netty will release body by itself
    final AsyncCallback producerCallback;
    if (configuration.isReuseChannel()) {
        // use callback as-is because we should not put it back in the pool as NettyProducerCallback would do
        // as when reuse channel is enabled it will put the channel back in the pool when exchange is done using on completion
        producerCallback = callback.getOriginalCallback();
    } else {
        producerCallback = new NettyProducerCallback(channelFuture, callback.getOriginalCallback());
    }
    // setup state as attachment on the channel, so we can access the state later when needed
    putState(channel, new NettyCamelState(producerCallback, exchange));
    // here we need to setup the remote address information here
    InetSocketAddress remoteAddress = null;
    if (!isTcp()) {
        remoteAddress = new InetSocketAddress(configuration.getHost(), configuration.getPort());
    }
    // write body
    NettyHelper.writeBodyAsync(LOG, channel, remoteAddress, body, exchange, new ChannelFutureListener() {

        public void operationComplete(ChannelFuture channelFuture) throws Exception {
            LOG.trace("Operation complete {}", channelFuture);
            if (!channelFuture.isSuccess()) {
                // no success then exit, (any exception has been handled by ClientChannelHandler#exceptionCaught)
                return;
            }
            // if we do not expect any reply then signal callback to continue routing
            if (!configuration.isSync()) {
                try {
                    // should channel be closed after complete?
                    Boolean close;
                    if (ExchangeHelper.isOutCapable(exchange)) {
                        close = exchange.getOut().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
                    } else {
                        close = exchange.getIn().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
                    }
                    // should we disconnect, the header can override the configuration
                    boolean disconnect = getConfiguration().isDisconnect();
                    if (close != null) {
                        disconnect = close;
                    }
                    // we should not close if we are reusing the channel
                    if (!configuration.isReuseChannel() && disconnect) {
                        if (LOG.isTraceEnabled()) {
                            LOG.trace("Closing channel when complete at address: {}", getEndpoint().getConfiguration().getAddress());
                        }
                        NettyHelper.close(channel);
                    }
                } finally {
                    // signal callback to continue routing
                    producerCallback.done(false);
                }
            }
        }
    });
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) InetSocketAddress(java.net.InetSocketAddress) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) EpollDatagramChannel(io.netty.channel.epoll.EpollDatagramChannel) EpollSocketChannel(io.netty.channel.epoll.EpollSocketChannel) Channel(io.netty.channel.Channel) NioDatagramChannel(io.netty.channel.socket.nio.NioDatagramChannel) AsyncCallback(org.apache.camel.AsyncCallback) ChannelHandler(io.netty.channel.ChannelHandler) ChannelFutureListener(io.netty.channel.ChannelFutureListener) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) ConnectException(java.net.ConnectException) CamelExchangeException(org.apache.camel.CamelExchangeException) Exchange(org.apache.camel.Exchange) ReadTimeoutHandler(io.netty.handler.timeout.ReadTimeoutHandler) SynchronizationAdapter(org.apache.camel.support.SynchronizationAdapter)

Example 12 with ChannelHandler

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project camel by apache.

the class ClientChannelHandler method channelRead0.

@Override
protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
    messageReceived = true;
    if (LOG.isTraceEnabled()) {
        LOG.trace("Message received: {}", msg);
    }
    ChannelHandler handler = ctx.pipeline().get("timeout");
    if (handler != null) {
        LOG.trace("Removing timeout channel as we received message");
        ctx.pipeline().remove(handler);
    }
    Exchange exchange = getExchange(ctx);
    if (exchange == null) {
        // we just ignore the received message as the channel is closed
        return;
    }
    AsyncCallback callback = getAsyncCallback(ctx);
    Message message;
    try {
        message = getResponseMessage(exchange, ctx, msg);
    } catch (Exception e) {
        exchange.setException(e);
        callback.done(false);
        return;
    }
    // set the result on either IN or OUT on the original exchange depending on its pattern
    if (ExchangeHelper.isOutCapable(exchange)) {
        exchange.setOut(message);
    } else {
        exchange.setIn(message);
    }
    try {
        // should channel be closed after complete?
        Boolean close;
        if (ExchangeHelper.isOutCapable(exchange)) {
            close = exchange.getOut().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
        } else {
            close = exchange.getIn().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
        }
        // check the setting on the exchange property
        if (close == null) {
            close = exchange.getProperty(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
        }
        // should we disconnect, the header can override the configuration
        boolean disconnect = producer.getConfiguration().isDisconnect();
        if (close != null) {
            disconnect = close;
        }
        // we should not close if we are reusing the channel
        if (!producer.getConfiguration().isReuseChannel() && disconnect) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("Closing channel when complete at address: {}", producer.getConfiguration().getAddress());
            }
            NettyHelper.close(ctx.channel());
        }
    } finally {
        // signal callback
        callback.done(false);
    }
}
Also used : Exchange(org.apache.camel.Exchange) Message(org.apache.camel.Message) AsyncCallback(org.apache.camel.AsyncCallback) ChannelHandler(io.netty.channel.ChannelHandler) CamelExchangeException(org.apache.camel.CamelExchangeException)

Example 13 with ChannelHandler

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project flink by apache.

the class NettyServerLowAndHighWatermarkTest method testLowAndHighWatermarks.

/**
	 * Verifies that the high and low watermark are set in relation to the page size.
	 *
	 * <p> The high and low water marks control the data flow to the wire. If the Netty write buffer
	 * has size greater or equal to the high water mark, the channel state becomes not-writable.
	 * Only when the size falls below the low water mark again, the state changes to writable again.
	 *
	 * <p> The Channel writability state needs to be checked by the handler when writing to the
	 * channel and is not enforced in the sense that you cannot write a channel, which is in
	 * not-writable state.
	 */
@Test
public void testLowAndHighWatermarks() throws Throwable {
    final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
    final NettyProtocol protocol = new NettyProtocol() {

        @Override
        public ChannelHandler[] getServerChannelHandlers() {
            // The channel handler implements the test
            return new ChannelHandler[] { new TestLowAndHighWatermarkHandler(error) };
        }

        @Override
        public ChannelHandler[] getClientChannelHandlers() {
            return new ChannelHandler[0];
        }
    };
    final NettyConfig conf = createConfig(PageSize);
    final NettyServerAndClient serverAndClient = initServerAndClient(protocol, conf);
    try {
        // We can't just check the config of this channel as it is the client's channel. We need
        // to check the server channel, because it is doing the data transfers.
        final Channel ch = connect(serverAndClient);
        // Wait for the channel to be closed
        awaitClose(ch);
        final Throwable t = error.get();
        if (t != null) {
            throw t;
        }
    } finally {
        shutdown(serverAndClient);
    }
}
Also used : Channel(io.netty.channel.Channel) AtomicReference(java.util.concurrent.atomic.AtomicReference) ChannelHandler(io.netty.channel.ChannelHandler) NettyServerAndClient(org.apache.flink.runtime.io.network.netty.NettyTestUtil.NettyServerAndClient) Test(org.junit.Test)

Example 14 with ChannelHandler

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project riposte by Nike-Inc.

the class HttpChannelInitializerTest method initChannel_adds_HttpRequestDecoder_with_config_values_coming_from_httpRequestDecoderConfig.

@Test
public void initChannel_adds_HttpRequestDecoder_with_config_values_coming_from_httpRequestDecoderConfig() {
    // given
    HttpChannelInitializer hci = basicHttpChannelInitializerNoUtilityHandlers();
    HttpRequestDecoderConfig configWithCustomValues = new HttpRequestDecoderConfig() {

        @Override
        public int maxInitialLineLength() {
            return 123;
        }

        @Override
        public int maxHeaderSize() {
            return 456;
        }

        @Override
        public int maxChunkSize() {
            return 789;
        }
    };
    Whitebox.setInternalState(hci, "httpRequestDecoderConfig", configWithCustomValues);
    // when
    hci.initChannel(socketChannelMock);
    // then
    ArgumentCaptor<ChannelHandler> channelHandlerArgumentCaptor = ArgumentCaptor.forClass(ChannelHandler.class);
    verify(channelPipelineMock, atLeastOnce()).addLast(anyString(), channelHandlerArgumentCaptor.capture());
    List<ChannelHandler> handlers = channelHandlerArgumentCaptor.getAllValues();
    Pair<Integer, HttpRequestDecoder> requestDecoderHandlerPair = findChannelHandler(handlers, HttpRequestDecoder.class);
    Assertions.assertThat(requestDecoderHandlerPair).isNotNull();
    HttpRequestDecoder decoderHandler = requestDecoderHandlerPair.getValue();
    int actualMaxInitialLineLength = extractField(extractField(decoderHandler, "lineParser"), "maxLength");
    int actualMaxHeaderSize = extractField(extractField(decoderHandler, "headerParser"), "maxLength");
    int actualMaxChunkSize = extractField(decoderHandler, "maxChunkSize");
    Assertions.assertThat(actualMaxInitialLineLength).isEqualTo(configWithCustomValues.maxInitialLineLength());
    Assertions.assertThat(actualMaxHeaderSize).isEqualTo(configWithCustomValues.maxHeaderSize());
    Assertions.assertThat(actualMaxChunkSize).isEqualTo(configWithCustomValues.maxChunkSize());
}
Also used : HttpRequestDecoderConfig(com.nike.riposte.server.config.ServerConfig.HttpRequestDecoderConfig) HttpRequestDecoder(io.netty.handler.codec.http.HttpRequestDecoder) ChannelHandler(io.netty.channel.ChannelHandler) Endpoint(com.nike.riposte.server.http.Endpoint) Test(org.junit.Test)

Example 15 with ChannelHandler

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler in project riposte by Nike-Inc.

the class StreamingAsyncHttpClient method prepChannelForDownstreamCall.

protected void prepChannelForDownstreamCall(ChannelPool pool, Channel ch, StreamingCallback callback, Deque<Span> distributedSpanStackToUse, Map<String, String> mdcContextToUse, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, boolean performSubSpanAroundDownstreamCalls, long downstreamCallTimeoutMillis, ObjectHolder<Boolean> callActiveHolder, ObjectHolder<Boolean> lastChunkSentDownstreamHolder) throws SSLException, NoSuchAlgorithmException, KeyStoreException {
    ChannelHandler chunkSenderHandler = new SimpleChannelInboundHandler<HttpObject>() {

        @Override
        protected void channelRead0(ChannelHandlerContext downstreamCallCtx, HttpObject msg) throws Exception {
            try {
                // the call is fully processed should not trigger the behavior a second time.
                if (callActiveHolder.heldObject) {
                    if (msg instanceof LastHttpContent) {
                        lastChunkSentDownstreamHolder.heldObject = true;
                        if (performSubSpanAroundDownstreamCalls) {
                            // Complete the subspan.
                            runnableWithTracingAndMdc(() -> {
                                if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
                                    Tracer.getInstance().completeRequestSpan();
                                else
                                    Tracer.getInstance().completeSubSpan();
                            }, distributedSpanStackToUse, mdcContextToUse).run();
                        }
                    }
                    HttpObject msgToPass = msg;
                    if (msg instanceof HttpResponse) {
                        // We can't pass the original HttpResponse back to the callback due to intricacies of how
                        // Netty handles determining the last chunk. If we do, and the callback ends up writing
                        // the message out to the client (which happens during proxy routing for example), then
                        // msg's headers might get modified - potentially causing this channel pipeline to
                        // never send a LastHttpContent, which will in turn cause an indefinite hang.
                        HttpResponse origHttpResponse = (HttpResponse) msg;
                        HttpResponse httpResponse = (msg instanceof FullHttpResponse) ? new DefaultFullHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus(), ((FullHttpResponse) msg).content()) : new DefaultHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus());
                        httpResponse.headers().add(origHttpResponse.headers());
                        msgToPass = httpResponse;
                    }
                    callback.messageReceived(msgToPass);
                } else {
                    if (shouldLogBadMessagesAfterRequestFinishes) {
                        runnableWithTracingAndMdc(() -> logger.warn("Received HttpObject msg when call was not active: {}", String.valueOf(msg)), distributedSpanStackToUse, mdcContextToUse).run();
                    }
                }
            } finally {
                if (msg instanceof LastHttpContent) {
                    releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "last content chunk sent", distributedSpanStackToUse, mdcContextToUse);
                }
            }
        }
    };
    Consumer<Throwable> doErrorHandlingConsumer = (cause) -> {
        Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
        try {
            // Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
            originalThreadInfo = linkTracingAndMdcToCurrentThread(distributedSpanStackToUse, mdcContextToUse);
            // call is fully processed should not trigger the behavior a second time.
            if (callActiveHolder.heldObject) {
                if (performSubSpanAroundDownstreamCalls) {
                    if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
                        Tracer.getInstance().completeRequestSpan();
                    else
                        Tracer.getInstance().completeSubSpan();
                }
                Tracer.getInstance().unregisterFromThread();
                if (cause instanceof Errors.NativeIoException) {
                    // NativeIoExceptions are often setup to not have stack traces which is bad for debugging.
                    // Wrap it in a NativeIoExceptionWrapper that maps to a 503 since this is likely a busted
                    // connection and a second attempt should work.
                    cause = new NativeIoExceptionWrapper("Caught a NativeIoException in the downstream streaming call pipeline. Wrapped it in a " + "NativeIoExceptionWrapper so that it maps to a 503 and provides a usable stack trace " + "in the logs.", (Errors.NativeIoException) cause);
                }
                callback.unrecoverableErrorOccurred(cause, true);
            } else {
                if (cause instanceof DownstreamIdleChannelTimeoutException) {
                    logger.debug("A channel used for downstream calls will be closed because it was idle too long. " + "This is normal behavior and does not indicate a downstream call failure: {}", cause.toString());
                } else {
                    logger.warn("Received exception in downstream call pipeline after the call was finished. " + "Not necessarily anything to worry about but in case it helps debugging the " + "exception was: {}", cause.toString());
                }
            }
        } finally {
            // Mark the channel as broken so it will be closed and removed from the pool when it is returned.
            markChannelAsBroken(ch);
            // Release it back to the pool if possible/necessary so the pool can do its usual cleanup.
            releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "error received in downstream pipeline: " + cause.toString(), distributedSpanStackToUse, mdcContextToUse);
            // No matter what the cause is we want to make sure the channel is closed. Doing this raw ch.close()
            // here will catch the cases where this channel does not have an active call but still needs to be
            // closed (e.g. an idle channel timeout that happens in-between calls).
            ch.close();
            // Unhook the tracing and MDC stuff from this thread now that we're done.
            unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
        }
    };
    ChannelHandler errorHandler = new ChannelInboundHandlerAdapter() {

        @Override
        public void exceptionCaught(ChannelHandlerContext downstreamCallCtx, Throwable cause) throws Exception {
            doErrorHandlingConsumer.accept(cause);
        }

        @Override
        public void channelInactive(ChannelHandlerContext ctx) throws Exception {
            if (logger.isDebugEnabled()) {
                runnableWithTracingAndMdc(() -> logger.debug("Downstream channel closing. call_active={}, last_chunk_sent_downstream={}, channel_id={}", callActiveHolder.heldObject, lastChunkSentDownstreamHolder.heldObject, ctx.channel().toString()), distributedSpanStackToUse, mdcContextToUse).run();
            }
            // We only care if the channel was closed while the call was active.
            if (callActiveHolder.heldObject)
                doErrorHandlingConsumer.accept(new DownstreamChannelClosedUnexpectedlyException(ch));
            super.channelInactive(ctx);
        }
    };
    // Set up the HTTP client pipeline.
    ChannelPipeline p = ch.pipeline();
    List<String> registeredHandlerNames = p.names();
    // couldn't be removed at that time because it wasn't in the channel's eventLoop.
    if (registeredHandlerNames.contains(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME)) {
        ChannelHandler idleHandler = p.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
        if (idleHandler != null)
            p.remove(idleHandler);
    }
    if (debugChannelLifecycleLoggingEnabled && !registeredHandlerNames.contains(DEBUG_LOGGER_HANDLER_NAME)) {
        // Add the channel debug logger if desired.
        p.addFirst(DEBUG_LOGGER_HANDLER_NAME, new LoggingHandler(DOWNSTREAM_CLIENT_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
    }
    // Add/replace a downstream call timeout detector.
    addOrReplacePipelineHandler(new DownstreamIdleChannelTimeoutHandler(downstreamCallTimeoutMillis, () -> callActiveHolder.heldObject, true, "StreamingAsyncHttpClientChannel-call-timeout", distributedSpanStackToUse, mdcContextToUse), DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, p, registeredHandlerNames);
    if (isSecureHttpsCall) {
        // SSL call. Make sure we add the SSL handler if necessary.
        if (!registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
            if (clientSslCtx == null) {
                if (relaxedHttpsValidation) {
                    clientSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
                } else {
                    TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
                    tmf.init((KeyStore) null);
                    clientSslCtx = SslContextBuilder.forClient().trustManager(tmf).build();
                }
            }
            p.addAfter(DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, SSL_HANDLER_NAME, clientSslCtx.newHandler(ch.alloc()));
        }
    } else {
        // Not an SSL call. Remove the SSL handler if it's there.
        if (registeredHandlerNames.contains(SSL_HANDLER_NAME))
            p.remove(SSL_HANDLER_NAME);
    }
    // The HttpClientCodec handler deals with HTTP codec stuff so you don't have to. Set it up if it hasn't already
    // been setup, and inspect it to make sure it's in a "ready to handle a new request" state. Some rare
    // and currently unknown edgecases can cause us to hit this point with the HttpClientCodec in an unclean
    // state, and if we barrel forward without cleaning this up the call will fail.
    boolean pipelineContainsHttpClientCodec = registeredHandlerNames.contains(HTTP_CLIENT_CODEC_HANDLER_NAME);
    boolean existingHttpClientCodecIsInBadState = false;
    if (pipelineContainsHttpClientCodec) {
        HttpClientCodec currentCodec = (HttpClientCodec) p.get(HTTP_CLIENT_CODEC_HANDLER_NAME);
        int currentHttpClientCodecInboundState = determineHttpClientCodecInboundState(currentCodec);
        if (currentHttpClientCodecInboundState != 0) {
            runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec inbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_inbound_state={}", currentHttpClientCodecInboundState), distributedSpanStackToUse, mdcContextToUse).run();
            existingHttpClientCodecIsInBadState = true;
        } else {
            int currentHttpClientCodecOutboundState = determineHttpClientCodecOutboundState(currentCodec);
            if (currentHttpClientCodecOutboundState != 0) {
                runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec outbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_outbound_state={}", currentHttpClientCodecOutboundState), distributedSpanStackToUse, mdcContextToUse).run();
                existingHttpClientCodecIsInBadState = true;
            }
        }
    }
    // or replace it if it was in a bad state.
    if (!pipelineContainsHttpClientCodec || existingHttpClientCodecIsInBadState) {
        addOrReplacePipelineHandler(new HttpClientCodec(4096, 8192, 8192, true), HTTP_CLIENT_CODEC_HANDLER_NAME, p, registeredHandlerNames);
    }
    // Update the chunk sender handler and error handler to the newly created versions that know about the correct
    // callback, dtrace info, etc to use for this request.
    addOrReplacePipelineHandler(chunkSenderHandler, CHUNK_SENDER_HANDLER_NAME, p, registeredHandlerNames);
    addOrReplacePipelineHandler(errorHandler, ERROR_HANDLER_NAME, p, registeredHandlerNames);
}
Also used : AttributeKey(io.netty.util.AttributeKey) Span(com.nike.wingtips.Span) HttpHeaders(io.netty.handler.codec.http.HttpHeaders) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) LoggerFactory(org.slf4j.LoggerFactory) Random(java.util.Random) KeyStoreException(java.security.KeyStoreException) AsyncNettyHelper.unlinkTracingAndMdcFromCurrentThread(com.nike.riposte.util.AsyncNettyHelper.unlinkTracingAndMdcFromCurrentThread) HttpObject(io.netty.handler.codec.http.HttpObject) HttpClientCodec(io.netty.handler.codec.http.HttpClientCodec) InetAddress(java.net.InetAddress) ChannelPromise(io.netty.channel.ChannelPromise) Map(java.util.Map) ThreadFactory(java.util.concurrent.ThreadFactory) SocketChannel(io.netty.channel.socket.SocketChannel) HttpObjectDecoder(io.netty.handler.codec.http.HttpObjectDecoder) HttpRequest(io.netty.handler.codec.http.HttpRequest) TrustManagerFactory(javax.net.ssl.TrustManagerFactory) DOWNSTREAM_CALL_CONNECTION_SETUP_TIME_NANOS_REQUEST_ATTR_KEY(com.nike.riposte.server.handler.ProxyRouterEndpointExecutionHandler.DOWNSTREAM_CALL_CONNECTION_SETUP_TIME_NANOS_REQUEST_ATTR_KEY) DownstreamIdleChannelTimeoutException(com.nike.riposte.server.error.exception.DownstreamIdleChannelTimeoutException) ChannelHealthChecker(io.netty.channel.pool.ChannelHealthChecker) DownstreamChannelClosedUnexpectedlyException(com.nike.riposte.server.error.exception.DownstreamChannelClosedUnexpectedlyException) KeyStore(java.security.KeyStore) ChannelPipeline(io.netty.channel.ChannelPipeline) InetSocketAddress(java.net.InetSocketAddress) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) List(java.util.List) SSLException(javax.net.ssl.SSLException) AbstractChannelPoolHandler(io.netty.channel.pool.AbstractChannelPoolHandler) LogLevel(io.netty.handler.logging.LogLevel) ChannelAttributes(com.nike.riposte.server.channelpipeline.ChannelAttributes) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) HttpObjectEncoder(io.netty.handler.codec.http.HttpObjectEncoder) DefaultFullHttpResponse(io.netty.handler.codec.http.DefaultFullHttpResponse) HttpResponse(io.netty.handler.codec.http.HttpResponse) ChannelPoolMap(io.netty.channel.pool.ChannelPoolMap) HttpProcessingState(com.nike.riposte.server.http.HttpProcessingState) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) HttpRequestEncoder(io.netty.handler.codec.http.HttpRequestEncoder) DownstreamIdleChannelTimeoutHandler(com.nike.riposte.client.asynchttp.netty.downstreampipeline.DownstreamIdleChannelTimeoutHandler) RequestInfo(com.nike.riposte.server.http.RequestInfo) ChannelOption(io.netty.channel.ChannelOption) LoggingHandler(io.netty.handler.logging.LoggingHandler) Tracer(com.nike.wingtips.Tracer) CompletableFuture(java.util.concurrent.CompletableFuture) Errors(io.netty.channel.unix.Errors) Deque(java.util.Deque) LastHttpContent(io.netty.handler.codec.http.LastHttpContent) EpollSocketChannel(io.netty.channel.epoll.EpollSocketChannel) AsyncNettyHelper.linkTracingAndMdcToCurrentThread(com.nike.riposte.util.AsyncNettyHelper.linkTracingAndMdcToCurrentThread) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) InsecureTrustManagerFactory(io.netty.handler.ssl.util.InsecureTrustManagerFactory) HttpRequestTracingUtils(com.nike.wingtips.http.HttpRequestTracingUtils) BiConsumer(java.util.function.BiConsumer) EpollEventLoopGroup(io.netty.channel.epoll.EpollEventLoopGroup) HttpContent(io.netty.handler.codec.http.HttpContent) Attribute(io.netty.util.Attribute) Logger(org.slf4j.Logger) EventLoopGroup(io.netty.channel.EventLoopGroup) CombinedChannelDuplexHandler(io.netty.channel.CombinedChannelDuplexHandler) SslContext(io.netty.handler.ssl.SslContext) Promise(io.netty.util.concurrent.Promise) HostnameResolutionException(com.nike.riposte.server.error.exception.HostnameResolutionException) Field(java.lang.reflect.Field) UnknownHostException(java.net.UnknownHostException) ChannelFuture(io.netty.channel.ChannelFuture) Epoll(io.netty.channel.epoll.Epoll) Consumer(java.util.function.Consumer) Channel(io.netty.channel.Channel) SimpleChannelPool(io.netty.channel.pool.SimpleChannelPool) Bootstrap(io.netty.bootstrap.Bootstrap) FullHttpResponse(io.netty.handler.codec.http.FullHttpResponse) WrapperException(com.nike.backstopper.exception.WrapperException) MDC(org.slf4j.MDC) SimpleChannelInboundHandler(io.netty.channel.SimpleChannelInboundHandler) NativeIoExceptionWrapper(com.nike.riposte.server.error.exception.NativeIoExceptionWrapper) AsyncNettyHelper.runnableWithTracingAndMdc(com.nike.riposte.util.AsyncNettyHelper.runnableWithTracingAndMdc) ChannelPool(io.netty.channel.pool.ChannelPool) SslContextBuilder(io.netty.handler.ssl.SslContextBuilder) ChannelHandler(io.netty.channel.ChannelHandler) Pair(com.nike.internal.util.Pair) AbstractChannelPoolMap(io.netty.channel.pool.AbstractChannelPoolMap) Future(io.netty.util.concurrent.Future) DownstreamIdleChannelTimeoutHandler(com.nike.riposte.client.asynchttp.netty.downstreampipeline.DownstreamIdleChannelTimeoutHandler) LoggingHandler(io.netty.handler.logging.LoggingHandler) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) ChannelHandler(io.netty.channel.ChannelHandler) HttpClientCodec(io.netty.handler.codec.http.HttpClientCodec) Span(com.nike.wingtips.Span) HttpObject(io.netty.handler.codec.http.HttpObject) DefaultFullHttpResponse(io.netty.handler.codec.http.DefaultFullHttpResponse) FullHttpResponse(io.netty.handler.codec.http.FullHttpResponse) Pair(com.nike.internal.util.Pair) SimpleChannelInboundHandler(io.netty.channel.SimpleChannelInboundHandler) DefaultFullHttpResponse(io.netty.handler.codec.http.DefaultFullHttpResponse) NativeIoExceptionWrapper(com.nike.riposte.server.error.exception.NativeIoExceptionWrapper) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) DefaultFullHttpResponse(io.netty.handler.codec.http.DefaultFullHttpResponse) HttpResponse(io.netty.handler.codec.http.HttpResponse) FullHttpResponse(io.netty.handler.codec.http.FullHttpResponse) LastHttpContent(io.netty.handler.codec.http.LastHttpContent) ChannelPipeline(io.netty.channel.ChannelPipeline) Errors(io.netty.channel.unix.Errors) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) TrustManagerFactory(javax.net.ssl.TrustManagerFactory) InsecureTrustManagerFactory(io.netty.handler.ssl.util.InsecureTrustManagerFactory) DownstreamChannelClosedUnexpectedlyException(com.nike.riposte.server.error.exception.DownstreamChannelClosedUnexpectedlyException) DownstreamIdleChannelTimeoutException(com.nike.riposte.server.error.exception.DownstreamIdleChannelTimeoutException) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter)

Aggregations

ChannelHandler (io.netty.channel.ChannelHandler)216 Test (org.junit.Test)88 ChannelHandlerContext (io.netty.channel.ChannelHandlerContext)44 Channel (io.netty.channel.Channel)27 ChannelPipeline (io.netty.channel.ChannelPipeline)26 SslHandler (io.netty.handler.ssl.SslHandler)25 Test (org.junit.jupiter.api.Test)24 EmbeddedChannel (io.netty.channel.embedded.EmbeddedChannel)23 ChannelInboundHandlerAdapter (io.netty.channel.ChannelInboundHandlerAdapter)21 FilterChainMatchingHandler (io.grpc.xds.FilterChainMatchingProtocolNegotiators.FilterChainMatchingHandler)20 ChannelFuture (io.netty.channel.ChannelFuture)20 FilterChainSelector (io.grpc.xds.FilterChainMatchingProtocolNegotiators.FilterChainMatchingHandler.FilterChainSelector)19 ChannelHandlerAdapter (io.netty.channel.ChannelHandlerAdapter)18 InetSocketAddress (java.net.InetSocketAddress)18 DownstreamTlsContext (io.grpc.xds.EnvoyServerProtoData.DownstreamTlsContext)17 FilterChain (io.grpc.xds.EnvoyServerProtoData.FilterChain)17 LineBasedFrameDecoder (io.netty.handler.codec.LineBasedFrameDecoder)16 AtomicReference (java.util.concurrent.atomic.AtomicReference)16 TcpIngester (com.wavefront.ingester.TcpIngester)15 ByteBuf (io.netty.buffer.ByteBuf)13