use of io.netty.channel.group.DefaultChannelGroup in project reactor-netty by reactor.
the class HttpServerTests method testGracefulShutdown.
@Test
void testGracefulShutdown() throws Exception {
CountDownLatch latch1 = new CountDownLatch(2);
CountDownLatch latch2 = new CountDownLatch(2);
CountDownLatch latch3 = new CountDownLatch(1);
LoopResources loop = LoopResources.create("testGracefulShutdown");
disposableServer = createServer().runOn(loop).doOnConnection(c -> {
c.onDispose().subscribe(null, null, latch2::countDown);
latch1.countDown();
}).channelGroup(new DefaultChannelGroup(new DefaultEventExecutor())).route(r -> r.get("/delay500", (req, res) -> res.sendString(Mono.just("delay500").delayElement(Duration.ofMillis(500)))).get("/delay1000", (req, res) -> res.sendString(Mono.just("delay1000").delayElement(Duration.ofSeconds(1))))).bindNow(Duration.ofSeconds(30));
HttpClient client = createClient(disposableServer::address);
AtomicReference<String> result = new AtomicReference<>();
Flux.just("/delay500", "/delay1000").flatMap(s -> client.get().uri(s).responseContent().aggregate().asString()).collect(Collectors.joining()).subscribe(s -> {
result.set(s);
latch3.countDown();
});
assertThat(latch1.await(30, TimeUnit.SECONDS)).isTrue();
// Stop accepting incoming requests, wait at most 3s for the active requests to finish
disposableServer.disposeNow();
assertThat(latch2.await(30, TimeUnit.SECONDS)).isTrue();
// Dispose the event loop
loop.disposeLater().block(Duration.ofSeconds(30));
assertThat(latch3.await(30, TimeUnit.SECONDS)).isTrue();
assertThat(result.get()).isNotNull().isEqualTo("delay500delay1000");
}
use of io.netty.channel.group.DefaultChannelGroup in project reactor-netty by reactor.
the class TcpServerTests method testGracefulShutdown.
@Test
void testGracefulShutdown() throws Exception {
CountDownLatch latch1 = new CountDownLatch(2);
CountDownLatch latch2 = new CountDownLatch(2);
CountDownLatch latch3 = new CountDownLatch(1);
LoopResources loop = LoopResources.create("testGracefulShutdown");
DisposableServer disposableServer = TcpServer.create().port(0).runOn(loop).doOnConnection(c -> {
c.onDispose().subscribe(null, null, latch2::countDown);
latch1.countDown();
}).channelGroup(new DefaultChannelGroup(new DefaultEventExecutor())).handle((in, out) -> out.sendString(Mono.just("delay1000").delayElement(Duration.ofSeconds(1)))).wiretap(true).bindNow(Duration.ofSeconds(30));
TcpClient client = TcpClient.create().remoteAddress(disposableServer::address).wiretap(true);
AtomicReference<String> result = new AtomicReference<>();
Flux.merge(client.connect(), client.connect()).flatMap(conn -> conn.inbound().receive().asString()).collect(Collectors.joining()).subscribe(s -> {
result.set(s);
latch3.countDown();
});
assertThat(latch1.await(30, TimeUnit.SECONDS)).as("latch await").isTrue();
// Stop accepting incoming requests, wait at most 3s for the active requests to finish
disposableServer.disposeNow();
// Dispose the event loop
loop.disposeLater().block(Duration.ofSeconds(30));
assertThat(latch2.await(30, TimeUnit.SECONDS)).as("latch await").isTrue();
assertThat(latch3.await(30, TimeUnit.SECONDS)).as("latch await").isTrue();
assertThat(result.get()).isNotNull().isEqualTo("delay1000delay1000");
}
use of io.netty.channel.group.DefaultChannelGroup in project cdap by caskdata.
the class ServiceSocksProxy method startUp.
@Override
protected void startUp() throws Exception {
ServerBootstrap bootstrap = new ServerBootstrap();
// We don't perform any blocking task in the proxy, only IO relying, hence doesn't need large amount of threads.
eventLoopGroup = new NioEventLoopGroup(10, Threads.createDaemonThreadFactory("service-socks-proxy-%d"));
bootstrap.group(eventLoopGroup).channel(NioServerSocketChannel.class).childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) {
channelGroup.add(ch);
ch.pipeline().addLast(new SocksPortUnificationServerHandler()).addLast(new ServiceSocksServerHandler(discoveryServiceClient, authenticator));
}
});
Channel serverChannel = bootstrap.bind(InetAddress.getLoopbackAddress(), 0).sync().channel();
bindAddress = (InetSocketAddress) serverChannel.localAddress();
channelGroup = new DefaultChannelGroup(ImmediateEventExecutor.INSTANCE);
channelGroup.add(serverChannel);
LOG.info("Runtime service socks proxy started on {}", bindAddress);
}
use of io.netty.channel.group.DefaultChannelGroup in project cdap by caskdata.
the class ServiceSocksServerConnectHandler method createForwardingChannelHandler.
@Override
protected Future<RelayChannelHandler> createForwardingChannelHandler(Channel inboundChannel, String destAddress, int destPort) {
Promise<RelayChannelHandler> promise = new DefaultPromise<>(inboundChannel.eventLoop());
// Creates a bootstrap for connecting to the target service
ChannelGroup channels = new DefaultChannelGroup(inboundChannel.eventLoop());
Bootstrap bootstrap = new Bootstrap().group(inboundChannel.eventLoop()).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true).handler(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) {
channels.add(ctx.channel());
// When the outbound connection is active, adds the relay channel handler for the current pipeline,
// which is for relaying traffic coming back from outbound connection.
// Also complete the relay channel handler future, which is for relaying traffic from inbound to outbound.
ctx.pipeline().addLast(new SimpleRelayChannelHandler(inboundChannel));
promise.setSuccess(new SimpleRelayChannelHandler(ctx.channel()));
}
});
// Discover the target address
Promise<Discoverable> discoverablePromise = new DefaultPromise<>(inboundChannel.eventLoop());
Cancellable cancellable = discoveryServiceClient.discover(destAddress).watchChanges(serviceDiscovered -> {
// If it is discovered, make a connection and complete the channel handler future
Discoverable discoverable = new RandomEndpointStrategy(() -> serviceDiscovered).pick();
if (discoverable != null) {
discoverablePromise.setSuccess(discoverable);
}
}, inboundChannel.eventLoop());
// When discovery completed successfully, connect to the destination
discoverablePromise.addListener((GenericFutureListener<Future<Discoverable>>) discoverableFuture -> {
cancellable.cancel();
if (discoverableFuture.isSuccess()) {
Discoverable discoverable = discoverableFuture.get();
bootstrap.connect(discoverable.getSocketAddress()).addListener((ChannelFutureListener) channelFuture -> {
if (!channelFuture.isSuccess()) {
promise.setFailure(channelFuture.cause());
}
});
} else {
promise.setFailure(discoverableFuture.cause());
}
});
// On inbound channel close, close all outbound channels.
// Also cancel the watch since it is no longer needed.
// This is to handle case where discovery never return an endpoint before client connection timeout
inboundChannel.closeFuture().addListener((ChannelFutureListener) future -> {
cancellable.cancel();
channels.close();
});
return promise;
}
use of io.netty.channel.group.DefaultChannelGroup in project vert.x by eclipse.
the class NetServerBase method listen.
public synchronized void listen(Handler<? super C> handler, int port, String host, Handler<AsyncResult<Void>> listenHandler) {
if (handler == null) {
throw new IllegalStateException("Set connect handler first");
}
if (listening) {
throw new IllegalStateException("Listen already called");
}
listening = true;
listenContext = vertx.getOrCreateContext();
registeredHandler = handler;
synchronized (vertx.sharedNetServers()) {
// Will be updated on bind for a wildcard port
this.actualPort = port;
id = new ServerID(port, host);
NetServerBase shared = vertx.sharedNetServers().get(id);
if (shared == null || port == 0) {
// Wildcard port will imply a new actual server each time
serverChannelGroup = new DefaultChannelGroup("vertx-acceptor-channels", GlobalEventExecutor.INSTANCE);
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.group(availableWorkers);
bootstrap.channel(NioServerSocketChannel.class);
sslHelper.validate(vertx);
bootstrap.childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
if (isPaused()) {
ch.close();
return;
}
ChannelPipeline pipeline = ch.pipeline();
NetServerBase.this.initChannel(ch.pipeline());
pipeline.addLast("handler", new ServerHandler(ch));
}
});
applyConnectionOptions(bootstrap);
handlerManager.addHandler(handler, listenContext);
try {
bindFuture = AsyncResolveConnectHelper.doBind(vertx, port, host, bootstrap);
bindFuture.addListener(res -> {
if (res.succeeded()) {
Channel ch = res.result();
log.trace("Net server listening on " + host + ":" + ch.localAddress());
NetServerBase.this.actualPort = ((InetSocketAddress) ch.localAddress()).getPort();
NetServerBase.this.id = new ServerID(NetServerBase.this.actualPort, id.host);
serverChannelGroup.add(ch);
vertx.sharedNetServers().put(id, NetServerBase.this);
metrics = vertx.metricsSPI().createMetrics(new SocketAddressImpl(id.port, id.host), options);
} else {
vertx.sharedNetServers().remove(id);
}
});
} catch (Throwable t) {
// Make sure we send the exception back through the handler (if any)
if (listenHandler != null) {
vertx.runOnContext(v -> listenHandler.handle(Future.failedFuture(t)));
} else {
// No handler - log so user can see failure
log.error(t);
}
listening = false;
return;
}
if (port != 0) {
vertx.sharedNetServers().put(id, this);
}
actualServer = this;
} else {
// Server already exists with that host/port - we will use that
actualServer = shared;
this.actualPort = shared.actualPort();
metrics = vertx.metricsSPI().createMetrics(new SocketAddressImpl(id.port, id.host), options);
actualServer.handlerManager.addHandler(handler, listenContext);
}
// just add it to the future so it gets notified once the bind is complete
actualServer.bindFuture.addListener(res -> {
if (listenHandler != null) {
AsyncResult<Void> ares;
if (res.succeeded()) {
ares = Future.succeededFuture();
} else {
listening = false;
ares = Future.failedFuture(res.cause());
}
listenContext.runOnContext(v -> listenHandler.handle(ares));
} else if (res.failed()) {
log.error("Failed to listen", res.cause());
listening = false;
}
});
}
return;
}
Aggregations