use of io.netty.channel.group.DefaultChannelGroup in project activemq-artemis by apache.
the class NettyAcceptor method start.
@Override
public synchronized void start() throws Exception {
if (channelClazz != null) {
// Already started
return;
}
String acceptorType;
if (useInvm) {
acceptorType = INVM_ACCEPTOR_TYPE;
channelClazz = LocalServerChannel.class;
eventLoopGroup = new DefaultEventLoopGroup();
} else {
if (remotingThreads == -1) {
// Default to number of cores * 3
remotingThreads = Runtime.getRuntime().availableProcessors() * 3;
}
if (useEpoll && CheckDependencies.isEpollAvailable()) {
channelClazz = EpollServerSocketChannel.class;
eventLoopGroup = new EpollEventLoopGroup(remotingThreads, AccessController.doPrivileged(new PrivilegedAction<ActiveMQThreadFactory>() {
@Override
public ActiveMQThreadFactory run() {
return new ActiveMQThreadFactory("activemq-netty-threads", true, ClientSessionFactoryImpl.class.getClassLoader());
}
}));
acceptorType = EPOLL_ACCEPTOR_TYPE;
logger.debug("Acceptor using native epoll");
} else if (useKQueue && CheckDependencies.isKQueueAvailable()) {
channelClazz = KQueueServerSocketChannel.class;
eventLoopGroup = new KQueueEventLoopGroup(remotingThreads, AccessController.doPrivileged(new PrivilegedAction<ActiveMQThreadFactory>() {
@Override
public ActiveMQThreadFactory run() {
return new ActiveMQThreadFactory("activemq-netty-threads", true, ClientSessionFactoryImpl.class.getClassLoader());
}
}));
acceptorType = KQUEUE_ACCEPTOR_TYPE;
logger.debug("Acceptor using native kqueue");
} else {
channelClazz = NioServerSocketChannel.class;
eventLoopGroup = new NioEventLoopGroup(remotingThreads, AccessController.doPrivileged(new PrivilegedAction<ActiveMQThreadFactory>() {
@Override
public ActiveMQThreadFactory run() {
return new ActiveMQThreadFactory("activemq-netty-threads", true, ClientSessionFactoryImpl.class.getClassLoader());
}
}));
acceptorType = NIO_ACCEPTOR_TYPE;
logger.debug("Acceptor using nio");
}
}
bootstrap = new ServerBootstrap();
bootstrap.group(eventLoopGroup);
bootstrap.channel(channelClazz);
ChannelInitializer<Channel> factory = new ChannelInitializer<Channel>() {
@Override
public void initChannel(Channel channel) throws Exception {
ChannelPipeline pipeline = channel.pipeline();
if (sslEnabled) {
final Pair<String, Integer> peerInfo = getPeerInfo(channel);
try {
pipeline.addLast("sni", new NettySNIHostnameHandler());
pipeline.addLast("ssl", getSslHandler(channel.alloc(), peerInfo.getA(), peerInfo.getB()));
pipeline.addLast("sslHandshakeExceptionHandler", new SslHandshakeExceptionHandler());
} catch (Exception e) {
Throwable rootCause = getRootCause(e);
ActiveMQServerLogger.LOGGER.gettingSslHandlerFailed(channel.remoteAddress().toString(), rootCause.getClass().getName() + ": " + rootCause.getMessage());
if (ActiveMQServerLogger.LOGGER.isDebugEnabled()) {
ActiveMQServerLogger.LOGGER.debug("Getting SSL handler failed", e);
}
throw e;
}
}
pipeline.addLast(protocolHandler.getProtocolDecoder());
}
private Pair<String, Integer> getPeerInfo(Channel channel) {
try {
String[] peerInfo = channel.remoteAddress().toString().replace("/", "").split(":");
return new Pair<>(peerInfo[0], Integer.parseInt(peerInfo[1]));
} catch (Exception e) {
logger.debug("Failed to parse peer info for SSL engine initialization", e);
}
return new Pair<>(null, 0);
}
};
bootstrap.childHandler(factory);
// Bind
bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
if (tcpReceiveBufferSize != -1) {
bootstrap.childOption(ChannelOption.SO_RCVBUF, tcpReceiveBufferSize);
}
if (tcpSendBufferSize != -1) {
bootstrap.childOption(ChannelOption.SO_SNDBUF, tcpSendBufferSize);
}
final int writeBufferLowWaterMark = this.writeBufferLowWaterMark != -1 ? this.writeBufferLowWaterMark : WriteBufferWaterMark.DEFAULT.low();
final int writeBufferHighWaterMark = this.writeBufferHighWaterMark != -1 ? this.writeBufferHighWaterMark : WriteBufferWaterMark.DEFAULT.high();
final WriteBufferWaterMark writeBufferWaterMark = new WriteBufferWaterMark(writeBufferLowWaterMark, writeBufferHighWaterMark);
bootstrap.childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, writeBufferWaterMark);
if (backlog != -1) {
bootstrap.option(ChannelOption.SO_BACKLOG, backlog);
}
bootstrap.option(ChannelOption.SO_REUSEADDR, true);
bootstrap.childOption(ChannelOption.SO_REUSEADDR, true);
bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
channelGroup = new DefaultChannelGroup("activemq-accepted-channels", GlobalEventExecutor.INSTANCE);
serverChannelGroup = new DefaultChannelGroup("activemq-acceptor-channels", GlobalEventExecutor.INSTANCE);
if (httpUpgradeEnabled) {
// the channel will be bound by the Web container and hand over after the HTTP Upgrade
// handshake is successful
} else {
startServerChannels();
paused = false;
if (notificationService != null) {
TypedProperties props = new TypedProperties();
props.putSimpleStringProperty(new SimpleString("factory"), new SimpleString(NettyAcceptorFactory.class.getName()));
props.putSimpleStringProperty(new SimpleString("host"), new SimpleString(host));
props.putIntProperty(new SimpleString("port"), actualPort);
Notification notification = new Notification(null, CoreNotificationType.ACCEPTOR_STARTED, props);
notificationService.sendNotification(notification);
}
ActiveMQServerLogger.LOGGER.startedAcceptor(acceptorType, host, actualPort, protocolsString);
}
if (batchDelay > 0) {
flusher = new BatchFlusher();
batchFlusherFuture = scheduledThreadPool.scheduleWithFixedDelay(flusher, batchDelay, batchDelay, TimeUnit.MILLISECONDS);
}
}
use of io.netty.channel.group.DefaultChannelGroup in project reactor-netty by reactor.
the class ConnectionPoolTests method testClientWithChannelGroup.
@Test
void testClientWithChannelGroup() {
HttpClient localClient1 = client.port(server1.port()).channelGroup(new DefaultChannelGroup(GlobalEventExecutor.INSTANCE));
HttpClient localClient2 = localClient1.channelGroup(new DefaultChannelGroup(GlobalEventExecutor.INSTANCE));
checkResponsesAndChannelsStates("server1-ConnectionPoolTests", "server1-ConnectionPoolTests", localClient1, localClient2);
}
use of io.netty.channel.group.DefaultChannelGroup in project reactor-netty by reactor.
the class HttpServerTests method testGracefulShutdown.
@Test
void testGracefulShutdown() throws Exception {
CountDownLatch latch1 = new CountDownLatch(2);
CountDownLatch latch2 = new CountDownLatch(2);
CountDownLatch latch3 = new CountDownLatch(1);
LoopResources loop = LoopResources.create("testGracefulShutdown");
disposableServer = createServer().runOn(loop).doOnConnection(c -> {
c.onDispose().subscribe(null, null, latch2::countDown);
latch1.countDown();
}).channelGroup(new DefaultChannelGroup(new DefaultEventExecutor())).route(r -> r.get("/delay500", (req, res) -> res.sendString(Mono.just("delay500").delayElement(Duration.ofMillis(500)))).get("/delay1000", (req, res) -> res.sendString(Mono.just("delay1000").delayElement(Duration.ofSeconds(1))))).bindNow(Duration.ofSeconds(30));
HttpClient client = createClient(disposableServer::address);
AtomicReference<String> result = new AtomicReference<>();
Flux.just("/delay500", "/delay1000").flatMap(s -> client.get().uri(s).responseContent().aggregate().asString()).collect(Collectors.joining()).subscribe(s -> {
result.set(s);
latch3.countDown();
});
assertThat(latch1.await(30, TimeUnit.SECONDS)).isTrue();
// Stop accepting incoming requests, wait at most 3s for the active requests to finish
disposableServer.disposeNow();
assertThat(latch2.await(30, TimeUnit.SECONDS)).isTrue();
// Dispose the event loop
loop.disposeLater().block(Duration.ofSeconds(30));
assertThat(latch3.await(30, TimeUnit.SECONDS)).isTrue();
assertThat(result.get()).isNotNull().isEqualTo("delay500delay1000");
}
use of io.netty.channel.group.DefaultChannelGroup in project reactor-netty by reactor.
the class TcpServerTests method testGracefulShutdown.
@Test
void testGracefulShutdown() throws Exception {
CountDownLatch latch1 = new CountDownLatch(2);
CountDownLatch latch2 = new CountDownLatch(2);
CountDownLatch latch3 = new CountDownLatch(1);
LoopResources loop = LoopResources.create("testGracefulShutdown");
DisposableServer disposableServer = TcpServer.create().port(0).runOn(loop).doOnConnection(c -> {
c.onDispose().subscribe(null, null, latch2::countDown);
latch1.countDown();
}).channelGroup(new DefaultChannelGroup(new DefaultEventExecutor())).handle((in, out) -> out.sendString(Mono.just("delay1000").delayElement(Duration.ofSeconds(1)))).wiretap(true).bindNow(Duration.ofSeconds(30));
TcpClient client = TcpClient.create().remoteAddress(disposableServer::address).wiretap(true);
AtomicReference<String> result = new AtomicReference<>();
Flux.merge(client.connect(), client.connect()).flatMap(conn -> conn.inbound().receive().asString()).collect(Collectors.joining()).subscribe(s -> {
result.set(s);
latch3.countDown();
});
assertThat(latch1.await(30, TimeUnit.SECONDS)).as("latch await").isTrue();
// Stop accepting incoming requests, wait at most 3s for the active requests to finish
disposableServer.disposeNow();
// Dispose the event loop
loop.disposeLater().block(Duration.ofSeconds(30));
assertThat(latch2.await(30, TimeUnit.SECONDS)).as("latch await").isTrue();
assertThat(latch3.await(30, TimeUnit.SECONDS)).as("latch await").isTrue();
assertThat(result.get()).isNotNull().isEqualTo("delay1000delay1000");
}
use of io.netty.channel.group.DefaultChannelGroup in project cdap by caskdata.
the class ServiceSocksProxy method startUp.
@Override
protected void startUp() throws Exception {
ServerBootstrap bootstrap = new ServerBootstrap();
// We don't perform any blocking task in the proxy, only IO relying, hence doesn't need large amount of threads.
eventLoopGroup = new NioEventLoopGroup(10, Threads.createDaemonThreadFactory("service-socks-proxy-%d"));
bootstrap.group(eventLoopGroup).channel(NioServerSocketChannel.class).childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) {
channelGroup.add(ch);
ch.pipeline().addLast(new SocksPortUnificationServerHandler()).addLast(new ServiceSocksServerHandler(discoveryServiceClient, authenticator));
}
});
Channel serverChannel = bootstrap.bind(InetAddress.getLoopbackAddress(), 0).sync().channel();
bindAddress = (InetSocketAddress) serverChannel.localAddress();
channelGroup = new DefaultChannelGroup(ImmediateEventExecutor.INSTANCE);
channelGroup.add(serverChannel);
LOG.info("Runtime service socks proxy started on {}", bindAddress);
}
Aggregations