use of io.netty.channel.WriteBufferWaterMark in project atomix by atomix.
the class NettyMessagingService method startAcceptingConnections.
private CompletableFuture<Void> startAcceptingConnections() {
CompletableFuture<Void> future = new CompletableFuture<>();
ServerBootstrap b = new ServerBootstrap();
b.option(ChannelOption.SO_REUSEADDR, true);
b.option(ChannelOption.SO_BACKLOG, 128);
b.childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(8 * 1024, 32 * 1024));
b.childOption(ChannelOption.SO_RCVBUF, 1024 * 1024);
b.childOption(ChannelOption.SO_SNDBUF, 1024 * 1024);
b.childOption(ChannelOption.SO_KEEPALIVE, true);
b.childOption(ChannelOption.TCP_NODELAY, true);
b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
b.group(serverGroup, clientGroup);
b.channel(serverChannelClass);
if (enableNettyTls) {
b.childHandler(new SslServerCommunicationChannelInitializer());
} else {
b.childHandler(new BasicChannelInitializer());
}
// Bind and start to accept incoming connections.
b.bind(localEndpoint.port()).addListener(f -> {
if (f.isSuccess()) {
log.info("{} accepting incoming connections on port {}", localEndpoint.host(), localEndpoint.port());
future.complete(null);
} else {
log.warn("{} failed to bind to port {} due to {}", localEndpoint.host(), localEndpoint.port(), f.cause());
future.completeExceptionally(f.cause());
}
});
return future;
}
use of io.netty.channel.WriteBufferWaterMark in project bookkeeper by apache.
the class PerChannelBookieClient method connect.
protected ChannelFuture connect() {
final long startTime = MathUtils.nowInNano();
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to bookie: {}", addr);
}
// Set up the ClientBootStrap so we can create a new Channel connection to the bookie.
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(eventLoopGroup);
if (eventLoopGroup instanceof EpollEventLoopGroup) {
bootstrap.channel(EpollSocketChannel.class);
} else if (eventLoopGroup instanceof DefaultEventLoopGroup) {
bootstrap.channel(LocalChannel.class);
} else {
bootstrap.channel(NioSocketChannel.class);
}
ByteBufAllocator allocator;
if (this.conf.isNettyUsePooledBuffers()) {
allocator = PooledByteBufAllocator.DEFAULT;
} else {
allocator = UnpooledByteBufAllocator.DEFAULT;
}
bootstrap.option(ChannelOption.ALLOCATOR, allocator);
bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.getClientConnectTimeoutMillis());
bootstrap.option(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(conf.getClientWriteBufferLowWaterMark(), conf.getClientWriteBufferHighWaterMark()));
if (!(eventLoopGroup instanceof DefaultEventLoopGroup)) {
bootstrap.option(ChannelOption.TCP_NODELAY, conf.getClientTcpNoDelay());
bootstrap.option(ChannelOption.SO_KEEPALIVE, conf.getClientSockKeepalive());
// if buffer sizes are 0, let OS auto-tune it
if (conf.getClientSendBufferSize() > 0) {
bootstrap.option(ChannelOption.SO_SNDBUF, conf.getClientSendBufferSize());
}
if (conf.getClientReceiveBufferSize() > 0) {
bootstrap.option(ChannelOption.SO_RCVBUF, conf.getClientReceiveBufferSize());
}
}
// In the netty pipeline, we need to split packets based on length, so we
// use the {@link LengthFieldBasedFramDecoder}. Other than that all actions
// are carried out in this class, e.g., making sense of received messages,
// prepending the length to outgoing packets etc.
bootstrap.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast("bytebufList", ByteBufList.ENCODER_WITH_SIZE);
pipeline.addLast("lengthbasedframedecoder", new LengthFieldBasedFrameDecoder(maxFrameSize, 0, 4, 0, 4));
pipeline.addLast("lengthprepender", new LengthFieldPrepender(4));
pipeline.addLast("bookieProtoEncoder", new BookieProtoEncoding.RequestEncoder(extRegistry));
pipeline.addLast("bookieProtoDecoder", new BookieProtoEncoding.ResponseDecoder(extRegistry, useV2WireProtocol));
pipeline.addLast("authHandler", new AuthHandler.ClientSideHandler(authProviderFactory, txnIdGenerator, connectionPeer));
pipeline.addLast("mainhandler", PerChannelBookieClient.this);
}
});
SocketAddress bookieAddr = addr.getSocketAddress();
if (eventLoopGroup instanceof DefaultEventLoopGroup) {
bookieAddr = addr.getLocalAddress();
}
ChannelFuture future = bootstrap.connect(bookieAddr);
future.addListener(new ConnectionFutureListener(startTime));
return future;
}
use of io.netty.channel.WriteBufferWaterMark in project xian by happyyangyuan.
the class RpcNettyClient method lazyInit.
/**
* @param nodeId The node's id to which you want to initialize the connection. This method is thread-safe because it is synchronized.
* @throws info.xiancloud.plugin.distribution.exception.ApplicationInstanceOfflineException Because the destination node is offline, of cause you cannot initialize the connection.
* @throws Exception Other unknown exceptions.
*/
private static void lazyInit(String nodeId) throws Exception {
lock.lock();
String host = null;
int port = -1;
try {
if (channelAvailable(nodeId)) {
LOG.debug(String.format("RpcClient:已经存在一个与%s的长连接,不再新建连接.", nodeId));
return;
}
LOG.info(String.format("RpcClient:开始新建与%s的长连接...", nodeId));
ApplicationInstance node = ApplicationRouter.singleton.getInstance(nodeId);
// 如果是在同一台主机内部部署的两个节点,那么避免走交换机、路由器了
host = Objects.equals(node.getAddress(), EnvUtil.getLocalIp()) ? "127.0.0.1" : node.getAddress();
port = node.getPort();
final SslContext sslCtx;
if (SSL) {
sslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
sslCtx = null;
}
EventLoopGroup group = new NioEventLoopGroup(1);
Bootstrap b = new Bootstrap();
b.group(group).option(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(// 10m
10 * 1024 * 1024, // 20m
20 * 1024 * 1024)).channel(NioSocketChannel.class).handler(new RpcNettyClientInitializer(sslCtx, nodeId)).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 100);
Channel connectedChannel = b.connect(host, port).sync().channel();
connectedChannel.closeFuture().addListener(future -> {
group.shutdownGracefully();
LOG.info("The EventLoopGroup has been terminated completely and all Channels that belong to the group have been closed.");
});
nodeId_to_connectedChannel_map.put(nodeId, connectedChannel);
LOG.info(new JSONObject() {
{
put("toNodeId", nodeId);
put("rpcRemoteAddress", connectedChannel.remoteAddress().toString());
put("type", "rpcChannelConnected");
put("description", String.format("RpcClient:与%s的长连接建立完毕, remoteAddress=%s", nodeId, connectedChannel.remoteAddress()));
}
}.toJSONString());
} catch (Throwable e) {
throw new Exception(String.format("与远程节点%s建立长连接失败:host=%s,port=%s", nodeId, host, port), e);
} finally {
lock.unlock();
}
}
use of io.netty.channel.WriteBufferWaterMark in project xian by happyyangyuan.
the class RpcNettyServer method start.
private void start() throws Exception {
if (Node.RPC_PORT < 0) {
LOG.error("No rpc port is specified, rpc server starting failed.");
return;
}
final SslContext sslCtx;
if (SSL) {
SelfSignedCertificate ssc = new SelfSignedCertificate();
sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build();
} else {
sslCtx = null;
}
EventLoopGroup bossGroup = new NioEventLoopGroup(1);
EventLoopGroup workerGroup = new NioEventLoopGroup(1);
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup).option(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(// 10m
10 * 1024 * 1024, // 20m
20 * 1024 * 1024)).channel(NioServerSocketChannel.class).handler(new LoggingHandler(LogLevel.INFO)).childHandler(new RpcServerInitializer(sslCtx));
parentChannel = b.bind(Node.RPC_PORT).sync().channel();
parentChannel.closeFuture().addListener(future -> {
workerGroup.shutdownGracefully();
bossGroup.shutdownGracefully();
LOG.info("The EventLoopGroup has been terminated completely and all Channels that belong to the group have been closed.");
});
}
use of io.netty.channel.WriteBufferWaterMark in project openflowplugin by opendaylight.
the class TcpHandler method run.
/**
* Starts server on selected port.
*/
@Override
public void run() {
/*
* We generally do not perform IO-unrelated tasks, so we want to have
* all outstanding tasks completed before the executing thread goes
* back into select.
*
* Any other setting means netty will measure the time it spent selecting
* and spend roughly proportional time executing tasks.
*/
// workerGroup.setIoRatio(100);
final ChannelFuture f;
try {
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.group(bossGroup, workerGroup).channel(socketChannelClass).handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(channelInitializer).option(ChannelOption.SO_BACKLOG, 128).option(ChannelOption.SO_REUSEADDR, true).childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true).childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT).childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(DEFAULT_WRITE_LOW_WATERMARK, DEFAULT_WRITE_HIGH_WATERMARK)).childOption(ChannelOption.WRITE_SPIN_COUNT, DEFAULT_WRITE_SPIN_COUNT);
if (startupAddress != null) {
f = bootstrap.bind(startupAddress.getHostAddress(), port).sync();
} else {
f = bootstrap.bind(port).sync();
}
} catch (InterruptedException e) {
LOG.error("Interrupted while binding port {}", port, e);
return;
}
try {
InetSocketAddress isa = (InetSocketAddress) f.channel().localAddress();
address = isa.getHostString();
// Update port, as it may have been specified as 0
this.port = isa.getPort();
LOG.debug("address from tcphandler: {}", address);
isOnlineFuture.set(true);
LOG.info("Switch listener started and ready to accept incoming tcp/tls connections on port: {}", port);
f.channel().closeFuture().sync();
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for port {} shutdown", port, e);
} finally {
shutdown();
}
}
Aggregations