Search in sources :

Example 1 with DownstreamIdleChannelTimeoutHandler

use of com.nike.riposte.client.asynchttp.netty.downstreampipeline.DownstreamIdleChannelTimeoutHandler in project riposte by Nike-Inc.

the class StreamingAsyncHttpClient method prepChannelForDownstreamCall.

protected void prepChannelForDownstreamCall(ChannelPool pool, Channel ch, StreamingCallback callback, Deque<Span> distributedSpanStackToUse, Map<String, String> mdcContextToUse, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, boolean performSubSpanAroundDownstreamCalls, long downstreamCallTimeoutMillis, ObjectHolder<Boolean> callActiveHolder, ObjectHolder<Boolean> lastChunkSentDownstreamHolder) throws SSLException, NoSuchAlgorithmException, KeyStoreException {
    ChannelHandler chunkSenderHandler = new SimpleChannelInboundHandler<HttpObject>() {

        @Override
        protected void channelRead0(ChannelHandlerContext downstreamCallCtx, HttpObject msg) throws Exception {
            try {
                //      the call is fully processed should not trigger the behavior a second time.
                if (callActiveHolder.heldObject) {
                    if (msg instanceof LastHttpContent) {
                        lastChunkSentDownstreamHolder.heldObject = true;
                        if (performSubSpanAroundDownstreamCalls) {
                            // Complete the subspan.
                            runnableWithTracingAndMdc(() -> {
                                if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
                                    Tracer.getInstance().completeRequestSpan();
                                else
                                    Tracer.getInstance().completeSubSpan();
                            }, distributedSpanStackToUse, mdcContextToUse).run();
                        }
                    }
                    HttpObject msgToPass = msg;
                    if (msg instanceof HttpResponse) {
                        // We can't pass the original HttpResponse back to the callback due to intricacies of how
                        //      Netty handles determining the last chunk. If we do, and the callback ends up writing
                        //      the message out to the client (which happens during proxy routing for example), then
                        //      msg's headers might get modified - potentially causing this channel pipeline to
                        //      never send a LastHttpContent, which will in turn cause an indefinite hang.
                        HttpResponse origHttpResponse = (HttpResponse) msg;
                        HttpResponse httpResponse = (msg instanceof FullHttpResponse) ? new DefaultFullHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus(), ((FullHttpResponse) msg).content()) : new DefaultHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus());
                        httpResponse.headers().add(origHttpResponse.headers());
                        msgToPass = httpResponse;
                    }
                    callback.messageReceived(msgToPass);
                } else {
                    if (shouldLogBadMessagesAfterRequestFinishes) {
                        runnableWithTracingAndMdc(() -> logger.warn("Received HttpObject msg when call was not active: {}", String.valueOf(msg)), distributedSpanStackToUse, mdcContextToUse).run();
                    }
                }
            } finally {
                if (msg instanceof LastHttpContent) {
                    releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "last content chunk sent", distributedSpanStackToUse, mdcContextToUse);
                }
            }
        }
    };
    Consumer<Throwable> doErrorHandlingConsumer = (cause) -> {
        Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
        try {
            // Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
            originalThreadInfo = linkTracingAndMdcToCurrentThread(distributedSpanStackToUse, mdcContextToUse);
            //      call is fully processed should not trigger the behavior a second time.
            if (callActiveHolder.heldObject) {
                if (performSubSpanAroundDownstreamCalls) {
                    if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
                        Tracer.getInstance().completeRequestSpan();
                    else
                        Tracer.getInstance().completeSubSpan();
                }
                Tracer.getInstance().unregisterFromThread();
                if (cause instanceof Errors.NativeIoException) {
                    // NativeIoExceptions are often setup to not have stack traces which is bad for debugging.
                    //      Wrap it in a NativeIoExceptionWrapper that maps to a 503 since this is likely a busted
                    //      connection and a second attempt should work.
                    cause = new NativeIoExceptionWrapper("Caught a NativeIoException in the downstream streaming call pipeline. Wrapped it in a " + "NativeIoExceptionWrapper so that it maps to a 503 and provides a usable stack trace " + "in the logs.", (Errors.NativeIoException) cause);
                }
                callback.unrecoverableErrorOccurred(cause);
            } else {
                if (cause instanceof DownstreamIdleChannelTimeoutException) {
                    logger.debug("A channel used for downstream calls will be closed because it was idle too long. " + "This is normal behavior and does not indicate a downstream call failure: {}", cause.toString());
                } else {
                    logger.warn("Received exception in downstream call pipeline after the call was finished. " + "Not necessarily anything to worry about but in case it helps debugging the " + "exception was: {}", cause.toString());
                }
            }
        } finally {
            // Mark the channel as broken so it will be closed and removed from the pool when it is returned.
            markChannelAsBroken(ch);
            // Release it back to the pool if possible/necessary so the pool can do its usual cleanup.
            releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "error received in downstream pipeline: " + cause.toString(), distributedSpanStackToUse, mdcContextToUse);
            // No matter what the cause is we want to make sure the channel is closed. Doing this raw ch.close()
            //      here will catch the cases where this channel does not have an active call but still needs to be
            //      closed (e.g. an idle channel timeout that happens in-between calls).
            ch.close();
            // Unhook the tracing and MDC stuff from this thread now that we're done.
            unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
        }
    };
    ChannelHandler errorHandler = new ChannelInboundHandlerAdapter() {

        @Override
        public void exceptionCaught(ChannelHandlerContext downstreamCallCtx, Throwable cause) throws Exception {
            doErrorHandlingConsumer.accept(cause);
        }

        @Override
        public void channelInactive(ChannelHandlerContext ctx) throws Exception {
            if (logger.isDebugEnabled()) {
                runnableWithTracingAndMdc(() -> logger.debug("Downstream channel closing. call_active={}, last_chunk_sent_downstream={}, channel_id={}", callActiveHolder.heldObject, lastChunkSentDownstreamHolder.heldObject, ctx.channel().toString()), distributedSpanStackToUse, mdcContextToUse).run();
            }
            // We only care if the channel was closed while the call was active.
            if (callActiveHolder.heldObject)
                doErrorHandlingConsumer.accept(new DownstreamChannelClosedUnexpectedlyException(ch));
            super.channelInactive(ctx);
        }
    };
    // Set up the HTTP client pipeline.
    ChannelPipeline p = ch.pipeline();
    List<String> registeredHandlerNames = p.names();
    //      couldn't be removed at that time because it wasn't in the channel's eventLoop.
    if (registeredHandlerNames.contains(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME)) {
        ChannelHandler idleHandler = p.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
        if (idleHandler != null)
            p.remove(idleHandler);
    }
    if (debugChannelLifecycleLoggingEnabled && !registeredHandlerNames.contains(DEBUG_LOGGER_HANDLER_NAME)) {
        // Add the channel debug logger if desired.
        p.addFirst(DEBUG_LOGGER_HANDLER_NAME, new LoggingHandler(DOWNSTREAM_CLIENT_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
    }
    // Add/replace a downstream call timeout detector.
    addOrReplacePipelineHandler(new DownstreamIdleChannelTimeoutHandler(downstreamCallTimeoutMillis, () -> callActiveHolder.heldObject, true, "StreamingAsyncHttpClientChannel-call-timeout", distributedSpanStackToUse, mdcContextToUse), DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, p, registeredHandlerNames);
    if (isSecureHttpsCall) {
        // SSL call. Make sure we add the SSL handler if necessary.
        if (!registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
            if (clientSslCtx == null) {
                if (relaxedHttpsValidation) {
                    clientSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
                } else {
                    TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
                    tmf.init((KeyStore) null);
                    clientSslCtx = SslContextBuilder.forClient().trustManager(tmf).build();
                }
            }
            p.addAfter(DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, SSL_HANDLER_NAME, clientSslCtx.newHandler(ch.alloc()));
        }
    } else {
        // Not an SSL call. Remove the SSL handler if it's there.
        if (registeredHandlerNames.contains(SSL_HANDLER_NAME))
            p.remove(SSL_HANDLER_NAME);
    }
    // The HttpClientCodec handler deals with HTTP codec stuff so you don't have to. Set it up if it hasn't already
    //      been setup, and inspect it to make sure it's in a "ready to handle a new request" state. Some rare
    //      and currently unknown edgecases can cause us to hit this point with the HttpClientCodec in an unclean
    //      state, and if we barrel forward without cleaning this up the call will fail.
    boolean pipelineContainsHttpClientCodec = registeredHandlerNames.contains(HTTP_CLIENT_CODEC_HANDLER_NAME);
    boolean existingHttpClientCodecIsInBadState = false;
    if (pipelineContainsHttpClientCodec) {
        HttpClientCodec currentCodec = (HttpClientCodec) p.get(HTTP_CLIENT_CODEC_HANDLER_NAME);
        int currentHttpClientCodecInboundState = determineHttpClientCodecInboundState(currentCodec);
        if (currentHttpClientCodecInboundState != 0) {
            runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec inbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_inbound_state={}", currentHttpClientCodecInboundState), distributedSpanStackToUse, mdcContextToUse).run();
            existingHttpClientCodecIsInBadState = true;
        } else {
            int currentHttpClientCodecOutboundState = determineHttpClientCodecOutboundState(currentCodec);
            if (currentHttpClientCodecOutboundState != 0) {
                runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec outbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_outbound_state={}", currentHttpClientCodecOutboundState), distributedSpanStackToUse, mdcContextToUse).run();
                existingHttpClientCodecIsInBadState = true;
            }
        }
    }
    //      or replace it if it was in a bad state.
    if (!pipelineContainsHttpClientCodec || existingHttpClientCodecIsInBadState) {
        addOrReplacePipelineHandler(new HttpClientCodec(4096, 8192, 8192, true), HTTP_CLIENT_CODEC_HANDLER_NAME, p, registeredHandlerNames);
    }
    // Update the chunk sender handler and error handler to the newly created versions that know about the correct
    //      callback, dtrace info, etc to use for this request.
    addOrReplacePipelineHandler(chunkSenderHandler, CHUNK_SENDER_HANDLER_NAME, p, registeredHandlerNames);
    addOrReplacePipelineHandler(errorHandler, ERROR_HANDLER_NAME, p, registeredHandlerNames);
}
Also used : AttributeKey(io.netty.util.AttributeKey) Span(com.nike.wingtips.Span) HttpHeaders(io.netty.handler.codec.http.HttpHeaders) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) HttpMessage(io.netty.handler.codec.http.HttpMessage) LoggerFactory(org.slf4j.LoggerFactory) TraceHeaders(com.nike.wingtips.TraceHeaders) Random(java.util.Random) KeyStoreException(java.security.KeyStoreException) AsyncNettyHelper.unlinkTracingAndMdcFromCurrentThread(com.nike.riposte.util.AsyncNettyHelper.unlinkTracingAndMdcFromCurrentThread) HttpObject(io.netty.handler.codec.http.HttpObject) HttpClientCodec(io.netty.handler.codec.http.HttpClientCodec) InetAddress(java.net.InetAddress) ChannelPromise(io.netty.channel.ChannelPromise) Map(java.util.Map) ThreadFactory(java.util.concurrent.ThreadFactory) SocketChannel(io.netty.channel.socket.SocketChannel) HttpObjectDecoder(io.netty.handler.codec.http.HttpObjectDecoder) HttpRequest(io.netty.handler.codec.http.HttpRequest) TrustManagerFactory(javax.net.ssl.TrustManagerFactory) DownstreamIdleChannelTimeoutException(com.nike.riposte.server.error.exception.DownstreamIdleChannelTimeoutException) ChannelHealthChecker(io.netty.channel.pool.ChannelHealthChecker) DownstreamChannelClosedUnexpectedlyException(com.nike.riposte.server.error.exception.DownstreamChannelClosedUnexpectedlyException) KeyStore(java.security.KeyStore) ChannelPipeline(io.netty.channel.ChannelPipeline) InetSocketAddress(java.net.InetSocketAddress) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) List(java.util.List) SSLException(javax.net.ssl.SSLException) AbstractChannelPoolHandler(io.netty.channel.pool.AbstractChannelPoolHandler) LogLevel(io.netty.handler.logging.LogLevel) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) HttpObjectEncoder(io.netty.handler.codec.http.HttpObjectEncoder) DefaultFullHttpResponse(io.netty.handler.codec.http.DefaultFullHttpResponse) HttpResponse(io.netty.handler.codec.http.HttpResponse) ChannelPoolMap(io.netty.channel.pool.ChannelPoolMap) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) HttpRequestEncoder(io.netty.handler.codec.http.HttpRequestEncoder) DownstreamIdleChannelTimeoutHandler(com.nike.riposte.client.asynchttp.netty.downstreampipeline.DownstreamIdleChannelTimeoutHandler) ChannelOption(io.netty.channel.ChannelOption) LoggingHandler(io.netty.handler.logging.LoggingHandler) Tracer(com.nike.wingtips.Tracer) CompletableFuture(java.util.concurrent.CompletableFuture) Errors(io.netty.channel.unix.Errors) Deque(java.util.Deque) LastHttpContent(io.netty.handler.codec.http.LastHttpContent) EpollSocketChannel(io.netty.channel.epoll.EpollSocketChannel) AsyncNettyHelper.linkTracingAndMdcToCurrentThread(com.nike.riposte.util.AsyncNettyHelper.linkTracingAndMdcToCurrentThread) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) InsecureTrustManagerFactory(io.netty.handler.ssl.util.InsecureTrustManagerFactory) BiConsumer(java.util.function.BiConsumer) EpollEventLoopGroup(io.netty.channel.epoll.EpollEventLoopGroup) HttpContent(io.netty.handler.codec.http.HttpContent) Attribute(io.netty.util.Attribute) Logger(org.slf4j.Logger) EventLoopGroup(io.netty.channel.EventLoopGroup) CombinedChannelDuplexHandler(io.netty.channel.CombinedChannelDuplexHandler) SslContext(io.netty.handler.ssl.SslContext) Promise(io.netty.util.concurrent.Promise) HostnameResolutionException(com.nike.riposte.server.error.exception.HostnameResolutionException) Field(java.lang.reflect.Field) UnknownHostException(java.net.UnknownHostException) ChannelFuture(io.netty.channel.ChannelFuture) Epoll(io.netty.channel.epoll.Epoll) Consumer(java.util.function.Consumer) Channel(io.netty.channel.Channel) SimpleChannelPool(io.netty.channel.pool.SimpleChannelPool) Bootstrap(io.netty.bootstrap.Bootstrap) FullHttpResponse(io.netty.handler.codec.http.FullHttpResponse) WrapperException(com.nike.backstopper.exception.WrapperException) MDC(org.slf4j.MDC) SimpleChannelInboundHandler(io.netty.channel.SimpleChannelInboundHandler) NativeIoExceptionWrapper(com.nike.riposte.server.error.exception.NativeIoExceptionWrapper) AsyncNettyHelper.runnableWithTracingAndMdc(com.nike.riposte.util.AsyncNettyHelper.runnableWithTracingAndMdc) ChannelPool(io.netty.channel.pool.ChannelPool) SslContextBuilder(io.netty.handler.ssl.SslContextBuilder) ChannelHandler(io.netty.channel.ChannelHandler) Pair(com.nike.internal.util.Pair) AbstractChannelPoolMap(io.netty.channel.pool.AbstractChannelPoolMap) Future(io.netty.util.concurrent.Future) DownstreamIdleChannelTimeoutHandler(com.nike.riposte.client.asynchttp.netty.downstreampipeline.DownstreamIdleChannelTimeoutHandler) LoggingHandler(io.netty.handler.logging.LoggingHandler) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) ChannelHandler(io.netty.channel.ChannelHandler) HttpClientCodec(io.netty.handler.codec.http.HttpClientCodec) Span(com.nike.wingtips.Span) HttpObject(io.netty.handler.codec.http.HttpObject) DefaultFullHttpResponse(io.netty.handler.codec.http.DefaultFullHttpResponse) FullHttpResponse(io.netty.handler.codec.http.FullHttpResponse) Pair(com.nike.internal.util.Pair) SimpleChannelInboundHandler(io.netty.channel.SimpleChannelInboundHandler) DefaultFullHttpResponse(io.netty.handler.codec.http.DefaultFullHttpResponse) NativeIoExceptionWrapper(com.nike.riposte.server.error.exception.NativeIoExceptionWrapper) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) DefaultFullHttpResponse(io.netty.handler.codec.http.DefaultFullHttpResponse) HttpResponse(io.netty.handler.codec.http.HttpResponse) FullHttpResponse(io.netty.handler.codec.http.FullHttpResponse) LastHttpContent(io.netty.handler.codec.http.LastHttpContent) ChannelPipeline(io.netty.channel.ChannelPipeline) Errors(io.netty.channel.unix.Errors) DefaultHttpResponse(io.netty.handler.codec.http.DefaultHttpResponse) TrustManagerFactory(javax.net.ssl.TrustManagerFactory) InsecureTrustManagerFactory(io.netty.handler.ssl.util.InsecureTrustManagerFactory) DownstreamChannelClosedUnexpectedlyException(com.nike.riposte.server.error.exception.DownstreamChannelClosedUnexpectedlyException) DownstreamIdleChannelTimeoutException(com.nike.riposte.server.error.exception.DownstreamIdleChannelTimeoutException) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter)

Example 2 with DownstreamIdleChannelTimeoutHandler

use of com.nike.riposte.client.asynchttp.netty.downstreampipeline.DownstreamIdleChannelTimeoutHandler in project riposte by Nike-Inc.

the class StreamingAsyncHttpClient method getPoolMap.

protected ChannelPoolMap<InetSocketAddress, SimpleChannelPool> getPoolMap() {
    ChannelPoolMap<InetSocketAddress, SimpleChannelPool> result = poolMap;
    if (poolMap == null) {
        /*
                This method gets called for every downstream call, so we don't want to synchronize the whole method. But
                it's easy for multiple threads to get here at the same time when the server starts up, so we need *some*
                kind of protection around the creation of poolMap, hence the elaborate (but correct) double-checked
                locking. Since poolMap is volatile this works, and the local variable "result" helps with speed during
                the normal case where poolMap has already been initialized.
                See https://en.wikipedia.org/wiki/Double-checked_locking
             */
        synchronized (this) {
            result = poolMap;
            if (result == null) {
                EventLoopGroup eventLoopGroup;
                Class<? extends SocketChannel> channelClass;
                if (Epoll.isAvailable()) {
                    logger.info("Creating channel pool. The epoll native transport is available. Using epoll instead of " + "NIO. proxy_router_using_native_epoll_transport=true");
                    eventLoopGroup = new EpollEventLoopGroup(0, createProxyRouterThreadFactory());
                    channelClass = EpollSocketChannel.class;
                } else {
                    logger.info("Creating channel pool. The epoll native transport is NOT available or you are not running " + "on a compatible OS/architecture. Using NIO. " + "proxy_router_using_native_epoll_transport=false");
                    eventLoopGroup = new NioEventLoopGroup(0, createProxyRouterThreadFactory());
                    channelClass = NioSocketChannel.class;
                }
                result = new AbstractChannelPoolMap<InetSocketAddress, SimpleChannelPool>() {

                    @Override
                    protected SimpleChannelPool newPool(InetSocketAddress key) {
                        return new SimpleChannelPool(generateClientBootstrap(eventLoopGroup, channelClass).remoteAddress(key), new ChannelPoolHandlerImpl(), CHANNEL_HEALTH_CHECK_INSTANCE) {

                            @Override
                            public Future<Void> release(Channel channel, Promise<Void> promise) {
                                markChannelBrokenAndLogInfoIfHttpClientCodecStateIsNotZero(channel, "Releasing channel back to pool");
                                return super.release(channel, promise);
                            }

                            @Override
                            protected Channel pollChannel() {
                                Channel channel = super.pollChannel();
                                if (channel != null) {
                                    markChannelBrokenAndLogInfoIfHttpClientCodecStateIsNotZero(channel, "Polling channel to be reused before healthcheck");
                                    if (idleChannelTimeoutMillis > 0) {
                                        /*
                                             We have a channel that is about to be re-used, so disable the idle channel
                                             timeout detector if it exists. By disabling it here we make sure that it is
                                             effectively "gone" before the healthcheck happens, preventing race
                                             conditions. Note that we can't call pipeline.remove() here because we may
                                             not be in the pipeline's event loop, so calling pipeline.remove() could
                                             lead to thread deadlock, but we can't call channel.eventLoop().execute()
                                             because we need it disabled *now* before the healthcheck happens. The
                                             pipeline preparation phase will remove it safely soon, and in the meantime
                                             it will be disabled.
                                             */
                                        ChannelPipeline pipeline = channel.pipeline();
                                        ChannelHandler idleHandler = pipeline.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
                                        if (idleHandler != null) {
                                            ((DownstreamIdleChannelTimeoutHandler) idleHandler).disableTimeoutHandling();
                                        }
                                    }
                                }
                                return channel;
                            }

                            @Override
                            protected boolean offerChannel(Channel channel) {
                                if (idleChannelTimeoutMillis > 0) {
                                    // Add an idle channel timeout detector. This will be removed before the
                                    //      channel's reacquisition healthcheck runs (in pollChannel()), so we won't
                                    //      have a race condition where this channel is handed over for use but gets
                                    //      squashed right before it's about to be used.
                                    // NOTE: Due to the semantics of pool.release() we're guaranteed to be in the
                                    //      channel's event loop, so there's no chance of a thread deadlock when
                                    //      messing with the pipeline.
                                    channel.pipeline().addFirst(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME, new DownstreamIdleChannelTimeoutHandler(idleChannelTimeoutMillis, () -> true, false, "StreamingAsyncHttpClientChannel-idle", null, null));
                                }
                                return super.offerChannel(channel);
                            }
                        };
                    }
                };
                poolMap = result;
            }
        }
    }
    return result;
}
Also used : DownstreamIdleChannelTimeoutHandler(com.nike.riposte.client.asynchttp.netty.downstreampipeline.DownstreamIdleChannelTimeoutHandler) InetSocketAddress(java.net.InetSocketAddress) SocketChannel(io.netty.channel.socket.SocketChannel) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) EpollSocketChannel(io.netty.channel.epoll.EpollSocketChannel) Channel(io.netty.channel.Channel) ChannelHandler(io.netty.channel.ChannelHandler) ChannelPipeline(io.netty.channel.ChannelPipeline) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) EpollEventLoopGroup(io.netty.channel.epoll.EpollEventLoopGroup) EventLoopGroup(io.netty.channel.EventLoopGroup) EpollEventLoopGroup(io.netty.channel.epoll.EpollEventLoopGroup) CompletableFuture(java.util.concurrent.CompletableFuture) ChannelFuture(io.netty.channel.ChannelFuture) Future(io.netty.util.concurrent.Future) SimpleChannelPool(io.netty.channel.pool.SimpleChannelPool) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup)

Aggregations

DownstreamIdleChannelTimeoutHandler (com.nike.riposte.client.asynchttp.netty.downstreampipeline.DownstreamIdleChannelTimeoutHandler)2 Channel (io.netty.channel.Channel)2 ChannelFuture (io.netty.channel.ChannelFuture)2 ChannelHandler (io.netty.channel.ChannelHandler)2 ChannelPipeline (io.netty.channel.ChannelPipeline)2 EventLoopGroup (io.netty.channel.EventLoopGroup)2 EpollEventLoopGroup (io.netty.channel.epoll.EpollEventLoopGroup)2 EpollSocketChannel (io.netty.channel.epoll.EpollSocketChannel)2 NioEventLoopGroup (io.netty.channel.nio.NioEventLoopGroup)2 SimpleChannelPool (io.netty.channel.pool.SimpleChannelPool)2 SocketChannel (io.netty.channel.socket.SocketChannel)2 NioSocketChannel (io.netty.channel.socket.nio.NioSocketChannel)2 WrapperException (com.nike.backstopper.exception.WrapperException)1 Pair (com.nike.internal.util.Pair)1 DownstreamChannelClosedUnexpectedlyException (com.nike.riposte.server.error.exception.DownstreamChannelClosedUnexpectedlyException)1 DownstreamIdleChannelTimeoutException (com.nike.riposte.server.error.exception.DownstreamIdleChannelTimeoutException)1 HostnameResolutionException (com.nike.riposte.server.error.exception.HostnameResolutionException)1 NativeIoExceptionWrapper (com.nike.riposte.server.error.exception.NativeIoExceptionWrapper)1 AsyncNettyHelper.linkTracingAndMdcToCurrentThread (com.nike.riposte.util.AsyncNettyHelper.linkTracingAndMdcToCurrentThread)1 AsyncNettyHelper.runnableWithTracingAndMdc (com.nike.riposte.util.AsyncNettyHelper.runnableWithTracingAndMdc)1