Search in sources :

Example 86 with ChannelFutureListener

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project netty by netty.

the class OioEventLoopTest method testTooManyServerChannels.

@Test
public void testTooManyServerChannels() throws Exception {
    EventLoopGroup g = new OioEventLoopGroup(1);
    ServerBootstrap b = new ServerBootstrap();
    b.channel(OioServerSocketChannel.class);
    b.group(g);
    b.childHandler(new ChannelInboundHandlerAdapter());
    ChannelFuture f1 = b.bind(0);
    f1.sync();
    ChannelFuture f2 = b.bind(0);
    f2.await();
    assertThat(f2.cause(), is(instanceOf(ChannelException.class)));
    assertThat(f2.cause().getMessage().toLowerCase(), containsString("too many channels"));
    final CountDownLatch notified = new CountDownLatch(1);
    f2.addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            notified.countDown();
        }
    });
    notified.await();
    g.shutdownGracefully();
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) EventLoopGroup(io.netty.channel.EventLoopGroup) CountDownLatch(java.util.concurrent.CountDownLatch) ChannelFutureListener(io.netty.channel.ChannelFutureListener) ServerBootstrap(io.netty.bootstrap.ServerBootstrap) ChannelException(io.netty.channel.ChannelException) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) Test(org.junit.jupiter.api.Test)

Example 87 with ChannelFutureListener

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project zuul by Netflix.

the class HAProxyMessageChannelHandler method channelRead.

@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
    if (msg instanceof HAProxyMessage) {
        HAProxyMessage hapm = (HAProxyMessage) msg;
        Channel channel = ctx.channel();
        channel.attr(ATTR_HAPROXY_MESSAGE).set(hapm);
        ctx.channel().closeFuture().addListener((ChannelFutureListener) future -> hapm.release());
        channel.attr(ATTR_HAPROXY_VERSION).set(hapm.protocolVersion());
        // Get the real host and port that the client connected to ELB with.
        String destinationAddress = hapm.destinationAddress();
        if (destinationAddress != null) {
            channel.attr(SourceAddressChannelHandler.ATTR_LOCAL_ADDRESS).set(destinationAddress);
            SocketAddress addr;
            out: {
                switch(hapm.proxiedProtocol()) {
                    case UNKNOWN:
                        throw new IllegalArgumentException("unknown proxy protocl" + destinationAddress);
                    case TCP4:
                    case TCP6:
                        InetSocketAddress inetAddr = new InetSocketAddress(InetAddresses.forString(destinationAddress), hapm.destinationPort());
                        addr = inetAddr;
                        // setting PPv2 explicitly because SourceAddressChannelHandler.ATTR_LOCAL_ADDR could be PPv2 or not
                        channel.attr(SourceAddressChannelHandler.ATTR_PROXY_PROTOCOL_DESTINATION_ADDRESS).set(inetAddr);
                        Attrs attrs = ctx.channel().attr(Server.CONN_DIMENSIONS).get();
                        if (inetAddr.getAddress() instanceof Inet4Address) {
                            HAPM_DEST_IP_VERSION.put(attrs, "v4");
                        } else if (inetAddr.getAddress() instanceof Inet6Address) {
                            HAPM_DEST_IP_VERSION.put(attrs, "v6");
                        } else {
                            HAPM_DEST_IP_VERSION.put(attrs, "unknown");
                        }
                        HAPM_DEST_PORT.put(attrs, hapm.destinationPort());
                        break out;
                    // TODO: implement
                    case UNIX_STREAM:
                    case UDP4:
                    case UDP6:
                    case UNIX_DGRAM:
                        throw new IllegalArgumentException("unknown proxy protocol" + destinationAddress);
                }
                throw new AssertionError(hapm.proxiedProtocol());
            }
            channel.attr(SourceAddressChannelHandler.ATTR_LOCAL_ADDR).set(addr);
        }
        // Get the real client IP from the ProxyProtocol message sent by the ELB, and overwrite the SourceAddress
        // channel attribute.
        String sourceAddress = hapm.sourceAddress();
        if (sourceAddress != null) {
            channel.attr(SourceAddressChannelHandler.ATTR_SOURCE_ADDRESS).set(sourceAddress);
            SocketAddress addr;
            out: {
                switch(hapm.proxiedProtocol()) {
                    case UNKNOWN:
                        throw new IllegalArgumentException("unknown proxy protocl" + sourceAddress);
                    case TCP4:
                    case TCP6:
                        InetSocketAddress inetAddr;
                        addr = inetAddr = new InetSocketAddress(InetAddresses.forString(sourceAddress), hapm.sourcePort());
                        Attrs attrs = ctx.channel().attr(Server.CONN_DIMENSIONS).get();
                        if (inetAddr.getAddress() instanceof Inet4Address) {
                            HAPM_SRC_IP_VERSION.put(attrs, "v4");
                        } else if (inetAddr.getAddress() instanceof Inet6Address) {
                            HAPM_SRC_IP_VERSION.put(attrs, "v6");
                        } else {
                            HAPM_SRC_IP_VERSION.put(attrs, "unknown");
                        }
                        break out;
                    // TODO: implement
                    case UNIX_STREAM:
                    case UDP4:
                    case UDP6:
                    case UNIX_DGRAM:
                        throw new IllegalArgumentException("unknown proxy protocol" + sourceAddress);
                }
                throw new AssertionError(hapm.proxiedProtocol());
            }
            channel.attr(SourceAddressChannelHandler.ATTR_REMOTE_ADDR).set(addr);
        }
        // TODO - fire an additional event to notify interested parties that we now know the IP?
        // Remove ourselves (this handler) from the channel now, as no more work to do.
        ctx.pipeline().remove(this);
        // Do not continue propagating the message.
        return;
    }
}
Also used : SourceAddressChannelHandler(com.netflix.netty.common.SourceAddressChannelHandler) AttributeKey(io.netty.util.AttributeKey) Attrs(com.netflix.zuul.Attrs) SocketAddress(java.net.SocketAddress) ChannelInboundHandlerAdapter(io.netty.channel.ChannelInboundHandlerAdapter) Inet4Address(java.net.Inet4Address) InetSocketAddress(java.net.InetSocketAddress) Channel(io.netty.channel.Channel) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) HAProxyMessage(io.netty.handler.codec.haproxy.HAProxyMessage) Inet6Address(java.net.Inet6Address) HAProxyProtocolVersion(io.netty.handler.codec.haproxy.HAProxyProtocolVersion) ChannelFutureListener(io.netty.channel.ChannelFutureListener) VisibleForTesting(com.google.common.annotations.VisibleForTesting) InetAddresses(com.google.common.net.InetAddresses) Server(com.netflix.zuul.netty.server.Server) Inet4Address(java.net.Inet4Address) InetSocketAddress(java.net.InetSocketAddress) Channel(io.netty.channel.Channel) Attrs(com.netflix.zuul.Attrs) Inet6Address(java.net.Inet6Address) HAProxyMessage(io.netty.handler.codec.haproxy.HAProxyMessage) SocketAddress(java.net.SocketAddress) InetSocketAddress(java.net.InetSocketAddress)

Example 88 with ChannelFutureListener

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project grpc-java by grpc.

the class NettyClientTransport method start.

@SuppressWarnings("unchecked")
@Override
public Runnable start(Listener transportListener) {
    lifecycleManager = new ClientTransportLifecycleManager(Preconditions.checkNotNull(transportListener, "listener"));
    EventLoop eventLoop = group.next();
    if (keepAliveTimeNanos != KEEPALIVE_TIME_NANOS_DISABLED) {
        keepAliveManager = new KeepAliveManager(new ClientKeepAlivePinger(this), eventLoop, keepAliveTimeNanos, keepAliveTimeoutNanos, keepAliveWithoutCalls);
    }
    handler = NettyClientHandler.newHandler(lifecycleManager, keepAliveManager, autoFlowControl, flowControlWindow, maxHeaderListSize, GrpcUtil.STOPWATCH_SUPPLIER, tooManyPingsRunnable, transportTracer, eagAttributes, authorityString, channelLogger);
    ChannelHandler negotiationHandler = negotiator.newHandler(handler);
    Bootstrap b = new Bootstrap();
    b.option(ALLOCATOR, Utils.getByteBufAllocator(false));
    b.group(eventLoop);
    b.channelFactory(channelFactory);
    // For non-socket based channel, the option will be ignored.
    b.option(SO_KEEPALIVE, true);
    // For non-epoll based channel, the option will be ignored.
    if (keepAliveTimeNanos != KEEPALIVE_TIME_NANOS_DISABLED) {
        ChannelOption<Integer> tcpUserTimeout = Utils.maybeGetTcpUserTimeoutOption();
        if (tcpUserTimeout != null) {
            b.option(tcpUserTimeout, (int) TimeUnit.NANOSECONDS.toMillis(keepAliveTimeoutNanos));
        }
    }
    for (Map.Entry<ChannelOption<?>, ?> entry : channelOptions.entrySet()) {
        // Every entry in the map is obtained from
        // NettyChannelBuilder#withOption(ChannelOption<T> option, T value)
        // so it is safe to pass the key-value pair to b.option().
        b.option((ChannelOption<Object>) entry.getKey(), entry.getValue());
    }
    ChannelHandler bufferingHandler = new WriteBufferingAndExceptionHandler(negotiationHandler);
    /**
     * We don't use a ChannelInitializer in the client bootstrap because its "initChannel" method
     * is executed in the event loop and we need this handler to be in the pipeline immediately so
     * that it may begin buffering writes.
     */
    b.handler(bufferingHandler);
    ChannelFuture regFuture = b.register();
    if (regFuture.isDone() && !regFuture.isSuccess()) {
        channel = null;
        // Initialization has failed badly. All new streams should be made to fail.
        Throwable t = regFuture.cause();
        if (t == null) {
            t = new IllegalStateException("Channel is null, but future doesn't have a cause");
        }
        statusExplainingWhyTheChannelIsNull = Utils.statusFromThrowable(t);
        // Use a Runnable since lifecycleManager calls transportListener
        return new Runnable() {

            @Override
            public void run() {
                // NOTICE: we not are calling lifecycleManager from the event loop. But there isn't really
                // an event loop in this case, so nothing should be accessing the lifecycleManager. We
                // could use GlobalEventExecutor (which is what regFuture would use for notifying
                // listeners in this case), but avoiding on-demand thread creation in an error case seems
                // a good idea and is probably clearer threading.
                lifecycleManager.notifyTerminated(statusExplainingWhyTheChannelIsNull);
            }
        };
    }
    channel = regFuture.channel();
    // Start the write queue as soon as the channel is constructed
    handler.startWriteQueue(channel);
    // This write will have no effect, yet it will only complete once the negotiationHandler
    // flushes any pending writes. We need it to be staged *before* the `connect` so that
    // the channel can't have been closed yet, removing all handlers. This write will sit in the
    // AbstractBufferingHandler's buffer, and will either be flushed on a successful connection,
    // or failed if the connection fails.
    channel.writeAndFlush(NettyClientHandler.NOOP_MESSAGE).addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (!future.isSuccess()) {
                // Need to notify of this failure, because NettyClientHandler may not have been added to
                // the pipeline before the error occurred.
                lifecycleManager.notifyTerminated(Utils.statusFromThrowable(future.cause()));
            }
        }
    });
    // Start the connection operation to the server.
    SocketAddress localAddress = localSocketPicker.createSocketAddress(remoteAddress, eagAttributes);
    if (localAddress != null) {
        channel.connect(remoteAddress, localAddress);
    } else {
        channel.connect(remoteAddress);
    }
    if (keepAliveManager != null) {
        keepAliveManager.onTransportStarted();
    }
    return null;
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) ChannelOption(io.netty.channel.ChannelOption) ChannelHandler(io.netty.channel.ChannelHandler) ChannelFutureListener(io.netty.channel.ChannelFutureListener) Http2ChannelClosedException(io.netty.handler.codec.http2.StreamBufferingEncoder.Http2ChannelClosedException) ClosedChannelException(java.nio.channels.ClosedChannelException) EventLoop(io.netty.channel.EventLoop) ClientKeepAlivePinger(io.grpc.internal.KeepAliveManager.ClientKeepAlivePinger) Bootstrap(io.netty.bootstrap.Bootstrap) KeepAliveManager(io.grpc.internal.KeepAliveManager) SocketAddress(java.net.SocketAddress) Map(java.util.Map)

Example 89 with ChannelFutureListener

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project grpc-java by grpc.

the class NettyClientTransport method ping.

@Override
public void ping(final PingCallback callback, final Executor executor) {
    if (channel == null) {
        executor.execute(new Runnable() {

            @Override
            public void run() {
                callback.onFailure(statusExplainingWhyTheChannelIsNull.asException());
            }
        });
        return;
    }
    // The promise and listener always succeed in NettyClientHandler. So this listener handles the
    // error case, when the channel is closed and the NettyClientHandler no longer in the pipeline.
    ChannelFutureListener failureListener = new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (!future.isSuccess()) {
                Status s = statusFromFailedFuture(future);
                Http2Ping.notifyFailed(callback, executor, s.asException());
            }
        }
    };
    // Write the command requesting the ping
    handler.getWriteQueue().enqueue(new SendPingCommand(callback, executor), true).addListener(failureListener);
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) Status(io.grpc.Status) ChannelFutureListener(io.netty.channel.ChannelFutureListener)

Example 90 with ChannelFutureListener

use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project grpc-java by grpc.

the class NettyClientHandler method sendPingFrameTraced.

/**
 * Sends a PING frame. If a ping operation is already outstanding, the callback in the message is
 * registered to be called when the existing operation completes, and no new frame is sent.
 */
private void sendPingFrameTraced(ChannelHandlerContext ctx, SendPingCommand msg, ChannelPromise promise) {
    // Don't check lifecycleManager.getShutdownStatus() since we want to allow pings after shutdown
    // but before termination. After termination, messages will no longer arrive because the
    // pipeline clears all handlers on channel close.
    PingCallback callback = msg.callback();
    Executor executor = msg.executor();
    // any outstanding operation
    if (ping != null) {
        promise.setSuccess();
        ping.addCallback(callback, executor);
        return;
    }
    // Use a new promise to prevent calling the callback twice on write failure: here and in
    // NettyClientTransport.ping(). It may appear strange, but it will behave the same as if
    // ping != null above.
    promise.setSuccess();
    promise = ctx().newPromise();
    // set outstanding operation
    long data = USER_PING_PAYLOAD;
    Stopwatch stopwatch = stopwatchFactory.get();
    stopwatch.start();
    ping = new Http2Ping(data, stopwatch);
    ping.addCallback(callback, executor);
    // and then write the ping
    encoder().writePing(ctx, false, USER_PING_PAYLOAD, promise);
    ctx.flush();
    final Http2Ping finalPing = ping;
    promise.addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                transportTracer.reportKeepAliveSent();
            } else {
                Throwable cause = future.cause();
                if (cause instanceof ClosedChannelException) {
                    cause = lifecycleManager.getShutdownThrowable();
                    if (cause == null) {
                        cause = Status.UNKNOWN.withDescription("Ping failed but for unknown reason.").withCause(future.cause()).asException();
                    }
                }
                finalPing.failed(cause);
                if (ping == finalPing) {
                    ping = null;
                }
            }
        }
    });
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) ClosedChannelException(java.nio.channels.ClosedChannelException) Executor(java.util.concurrent.Executor) Http2Ping(io.grpc.internal.Http2Ping) Stopwatch(com.google.common.base.Stopwatch) PingCallback(io.grpc.internal.ClientTransport.PingCallback) ChannelFutureListener(io.netty.channel.ChannelFutureListener) Http2Exception(io.netty.handler.codec.http2.Http2Exception) StatusException(io.grpc.StatusException) ClosedChannelException(java.nio.channels.ClosedChannelException)

Aggregations

ChannelFutureListener (io.netty.channel.ChannelFutureListener)223 ChannelFuture (io.netty.channel.ChannelFuture)208 Channel (io.netty.channel.Channel)70 ChannelHandlerContext (io.netty.channel.ChannelHandlerContext)57 ByteBuf (io.netty.buffer.ByteBuf)49 Bootstrap (io.netty.bootstrap.Bootstrap)43 Test (org.junit.jupiter.api.Test)41 CountDownLatch (java.util.concurrent.CountDownLatch)36 IOException (java.io.IOException)35 ServerBootstrap (io.netty.bootstrap.ServerBootstrap)33 ChannelInboundHandlerAdapter (io.netty.channel.ChannelInboundHandlerAdapter)31 NioSocketChannel (io.netty.channel.socket.nio.NioSocketChannel)31 InetSocketAddress (java.net.InetSocketAddress)27 ClosedChannelException (java.nio.channels.ClosedChannelException)25 ChannelPromise (io.netty.channel.ChannelPromise)21 Logger (org.slf4j.Logger)21 LoggerFactory (org.slf4j.LoggerFactory)21 NioEventLoopGroup (io.netty.channel.nio.NioEventLoopGroup)20 EventLoopGroup (io.netty.channel.EventLoopGroup)18 List (java.util.List)17