use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project netty by netty.
the class OioEventLoopTest method testTooManyServerChannels.
@Test
public void testTooManyServerChannels() throws Exception {
EventLoopGroup g = new OioEventLoopGroup(1);
ServerBootstrap b = new ServerBootstrap();
b.channel(OioServerSocketChannel.class);
b.group(g);
b.childHandler(new ChannelInboundHandlerAdapter());
ChannelFuture f1 = b.bind(0);
f1.sync();
ChannelFuture f2 = b.bind(0);
f2.await();
assertThat(f2.cause(), is(instanceOf(ChannelException.class)));
assertThat(f2.cause().getMessage().toLowerCase(), containsString("too many channels"));
final CountDownLatch notified = new CountDownLatch(1);
f2.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
notified.countDown();
}
});
notified.await();
g.shutdownGracefully();
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project zuul by Netflix.
the class HAProxyMessageChannelHandler method channelRead.
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (msg instanceof HAProxyMessage) {
HAProxyMessage hapm = (HAProxyMessage) msg;
Channel channel = ctx.channel();
channel.attr(ATTR_HAPROXY_MESSAGE).set(hapm);
ctx.channel().closeFuture().addListener((ChannelFutureListener) future -> hapm.release());
channel.attr(ATTR_HAPROXY_VERSION).set(hapm.protocolVersion());
// Get the real host and port that the client connected to ELB with.
String destinationAddress = hapm.destinationAddress();
if (destinationAddress != null) {
channel.attr(SourceAddressChannelHandler.ATTR_LOCAL_ADDRESS).set(destinationAddress);
SocketAddress addr;
out: {
switch(hapm.proxiedProtocol()) {
case UNKNOWN:
throw new IllegalArgumentException("unknown proxy protocl" + destinationAddress);
case TCP4:
case TCP6:
InetSocketAddress inetAddr = new InetSocketAddress(InetAddresses.forString(destinationAddress), hapm.destinationPort());
addr = inetAddr;
// setting PPv2 explicitly because SourceAddressChannelHandler.ATTR_LOCAL_ADDR could be PPv2 or not
channel.attr(SourceAddressChannelHandler.ATTR_PROXY_PROTOCOL_DESTINATION_ADDRESS).set(inetAddr);
Attrs attrs = ctx.channel().attr(Server.CONN_DIMENSIONS).get();
if (inetAddr.getAddress() instanceof Inet4Address) {
HAPM_DEST_IP_VERSION.put(attrs, "v4");
} else if (inetAddr.getAddress() instanceof Inet6Address) {
HAPM_DEST_IP_VERSION.put(attrs, "v6");
} else {
HAPM_DEST_IP_VERSION.put(attrs, "unknown");
}
HAPM_DEST_PORT.put(attrs, hapm.destinationPort());
break out;
// TODO: implement
case UNIX_STREAM:
case UDP4:
case UDP6:
case UNIX_DGRAM:
throw new IllegalArgumentException("unknown proxy protocol" + destinationAddress);
}
throw new AssertionError(hapm.proxiedProtocol());
}
channel.attr(SourceAddressChannelHandler.ATTR_LOCAL_ADDR).set(addr);
}
// Get the real client IP from the ProxyProtocol message sent by the ELB, and overwrite the SourceAddress
// channel attribute.
String sourceAddress = hapm.sourceAddress();
if (sourceAddress != null) {
channel.attr(SourceAddressChannelHandler.ATTR_SOURCE_ADDRESS).set(sourceAddress);
SocketAddress addr;
out: {
switch(hapm.proxiedProtocol()) {
case UNKNOWN:
throw new IllegalArgumentException("unknown proxy protocl" + sourceAddress);
case TCP4:
case TCP6:
InetSocketAddress inetAddr;
addr = inetAddr = new InetSocketAddress(InetAddresses.forString(sourceAddress), hapm.sourcePort());
Attrs attrs = ctx.channel().attr(Server.CONN_DIMENSIONS).get();
if (inetAddr.getAddress() instanceof Inet4Address) {
HAPM_SRC_IP_VERSION.put(attrs, "v4");
} else if (inetAddr.getAddress() instanceof Inet6Address) {
HAPM_SRC_IP_VERSION.put(attrs, "v6");
} else {
HAPM_SRC_IP_VERSION.put(attrs, "unknown");
}
break out;
// TODO: implement
case UNIX_STREAM:
case UDP4:
case UDP6:
case UNIX_DGRAM:
throw new IllegalArgumentException("unknown proxy protocol" + sourceAddress);
}
throw new AssertionError(hapm.proxiedProtocol());
}
channel.attr(SourceAddressChannelHandler.ATTR_REMOTE_ADDR).set(addr);
}
// TODO - fire an additional event to notify interested parties that we now know the IP?
// Remove ourselves (this handler) from the channel now, as no more work to do.
ctx.pipeline().remove(this);
// Do not continue propagating the message.
return;
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project grpc-java by grpc.
the class NettyClientTransport method start.
@SuppressWarnings("unchecked")
@Override
public Runnable start(Listener transportListener) {
lifecycleManager = new ClientTransportLifecycleManager(Preconditions.checkNotNull(transportListener, "listener"));
EventLoop eventLoop = group.next();
if (keepAliveTimeNanos != KEEPALIVE_TIME_NANOS_DISABLED) {
keepAliveManager = new KeepAliveManager(new ClientKeepAlivePinger(this), eventLoop, keepAliveTimeNanos, keepAliveTimeoutNanos, keepAliveWithoutCalls);
}
handler = NettyClientHandler.newHandler(lifecycleManager, keepAliveManager, autoFlowControl, flowControlWindow, maxHeaderListSize, GrpcUtil.STOPWATCH_SUPPLIER, tooManyPingsRunnable, transportTracer, eagAttributes, authorityString, channelLogger);
ChannelHandler negotiationHandler = negotiator.newHandler(handler);
Bootstrap b = new Bootstrap();
b.option(ALLOCATOR, Utils.getByteBufAllocator(false));
b.group(eventLoop);
b.channelFactory(channelFactory);
// For non-socket based channel, the option will be ignored.
b.option(SO_KEEPALIVE, true);
// For non-epoll based channel, the option will be ignored.
if (keepAliveTimeNanos != KEEPALIVE_TIME_NANOS_DISABLED) {
ChannelOption<Integer> tcpUserTimeout = Utils.maybeGetTcpUserTimeoutOption();
if (tcpUserTimeout != null) {
b.option(tcpUserTimeout, (int) TimeUnit.NANOSECONDS.toMillis(keepAliveTimeoutNanos));
}
}
for (Map.Entry<ChannelOption<?>, ?> entry : channelOptions.entrySet()) {
// Every entry in the map is obtained from
// NettyChannelBuilder#withOption(ChannelOption<T> option, T value)
// so it is safe to pass the key-value pair to b.option().
b.option((ChannelOption<Object>) entry.getKey(), entry.getValue());
}
ChannelHandler bufferingHandler = new WriteBufferingAndExceptionHandler(negotiationHandler);
/**
* We don't use a ChannelInitializer in the client bootstrap because its "initChannel" method
* is executed in the event loop and we need this handler to be in the pipeline immediately so
* that it may begin buffering writes.
*/
b.handler(bufferingHandler);
ChannelFuture regFuture = b.register();
if (regFuture.isDone() && !regFuture.isSuccess()) {
channel = null;
// Initialization has failed badly. All new streams should be made to fail.
Throwable t = regFuture.cause();
if (t == null) {
t = new IllegalStateException("Channel is null, but future doesn't have a cause");
}
statusExplainingWhyTheChannelIsNull = Utils.statusFromThrowable(t);
// Use a Runnable since lifecycleManager calls transportListener
return new Runnable() {
@Override
public void run() {
// NOTICE: we not are calling lifecycleManager from the event loop. But there isn't really
// an event loop in this case, so nothing should be accessing the lifecycleManager. We
// could use GlobalEventExecutor (which is what regFuture would use for notifying
// listeners in this case), but avoiding on-demand thread creation in an error case seems
// a good idea and is probably clearer threading.
lifecycleManager.notifyTerminated(statusExplainingWhyTheChannelIsNull);
}
};
}
channel = regFuture.channel();
// Start the write queue as soon as the channel is constructed
handler.startWriteQueue(channel);
// This write will have no effect, yet it will only complete once the negotiationHandler
// flushes any pending writes. We need it to be staged *before* the `connect` so that
// the channel can't have been closed yet, removing all handlers. This write will sit in the
// AbstractBufferingHandler's buffer, and will either be flushed on a successful connection,
// or failed if the connection fails.
channel.writeAndFlush(NettyClientHandler.NOOP_MESSAGE).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
// Need to notify of this failure, because NettyClientHandler may not have been added to
// the pipeline before the error occurred.
lifecycleManager.notifyTerminated(Utils.statusFromThrowable(future.cause()));
}
}
});
// Start the connection operation to the server.
SocketAddress localAddress = localSocketPicker.createSocketAddress(remoteAddress, eagAttributes);
if (localAddress != null) {
channel.connect(remoteAddress, localAddress);
} else {
channel.connect(remoteAddress);
}
if (keepAliveManager != null) {
keepAliveManager.onTransportStarted();
}
return null;
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project grpc-java by grpc.
the class NettyClientTransport method ping.
@Override
public void ping(final PingCallback callback, final Executor executor) {
if (channel == null) {
executor.execute(new Runnable() {
@Override
public void run() {
callback.onFailure(statusExplainingWhyTheChannelIsNull.asException());
}
});
return;
}
// The promise and listener always succeed in NettyClientHandler. So this listener handles the
// error case, when the channel is closed and the NettyClientHandler no longer in the pipeline.
ChannelFutureListener failureListener = new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
Status s = statusFromFailedFuture(future);
Http2Ping.notifyFailed(callback, executor, s.asException());
}
}
};
// Write the command requesting the ping
handler.getWriteQueue().enqueue(new SendPingCommand(callback, executor), true).addListener(failureListener);
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project grpc-java by grpc.
the class NettyClientHandler method sendPingFrameTraced.
/**
* Sends a PING frame. If a ping operation is already outstanding, the callback in the message is
* registered to be called when the existing operation completes, and no new frame is sent.
*/
private void sendPingFrameTraced(ChannelHandlerContext ctx, SendPingCommand msg, ChannelPromise promise) {
// Don't check lifecycleManager.getShutdownStatus() since we want to allow pings after shutdown
// but before termination. After termination, messages will no longer arrive because the
// pipeline clears all handlers on channel close.
PingCallback callback = msg.callback();
Executor executor = msg.executor();
// any outstanding operation
if (ping != null) {
promise.setSuccess();
ping.addCallback(callback, executor);
return;
}
// Use a new promise to prevent calling the callback twice on write failure: here and in
// NettyClientTransport.ping(). It may appear strange, but it will behave the same as if
// ping != null above.
promise.setSuccess();
promise = ctx().newPromise();
// set outstanding operation
long data = USER_PING_PAYLOAD;
Stopwatch stopwatch = stopwatchFactory.get();
stopwatch.start();
ping = new Http2Ping(data, stopwatch);
ping.addCallback(callback, executor);
// and then write the ping
encoder().writePing(ctx, false, USER_PING_PAYLOAD, promise);
ctx.flush();
final Http2Ping finalPing = ping;
promise.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
transportTracer.reportKeepAliveSent();
} else {
Throwable cause = future.cause();
if (cause instanceof ClosedChannelException) {
cause = lifecycleManager.getShutdownThrowable();
if (cause == null) {
cause = Status.UNKNOWN.withDescription("Ping failed but for unknown reason.").withCause(future.cause()).asException();
}
}
finalPing.failed(cause);
if (ping == finalPing) {
ping = null;
}
}
}
});
}
Aggregations