use of org.apache.flink.shaded.netty4.io.netty.bootstrap.Bootstrap in project netty by netty.
the class NioSocketChannelTest method testChannelReRegisterRead.
private static void testChannelReRegisterRead(final boolean sameEventLoop) throws Exception {
final EventLoopGroup group = new NioEventLoopGroup(2);
final CountDownLatch latch = new CountDownLatch(1);
// Just some random bytes
byte[] bytes = new byte[1024];
PlatformDependent.threadLocalRandom().nextBytes(bytes);
Channel sc = null;
Channel cc = null;
ServerBootstrap b = new ServerBootstrap();
try {
b.group(group).channel(NioServerSocketChannel.class).childOption(ChannelOption.SO_KEEPALIVE, true).childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(new SimpleChannelInboundHandler<ByteBuf>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, ByteBuf byteBuf) {
// We was able to read something from the Channel after reregister.
latch.countDown();
}
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
final EventLoop loop = group.next();
if (sameEventLoop) {
deregister(ctx, loop);
} else {
loop.execute(new Runnable() {
@Override
public void run() {
deregister(ctx, loop);
}
});
}
}
private void deregister(ChannelHandlerContext ctx, final EventLoop loop) {
// As soon as the channel becomes active re-register it to another
// EventLoop. After this is done we should still receive the data that
// was written to the channel.
ctx.deregister().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture cf) {
Channel channel = cf.channel();
assertNotSame(loop, channel.eventLoop());
group.next().register(channel);
}
});
}
});
}
});
sc = b.bind(0).syncUninterruptibly().channel();
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(group).channel(NioSocketChannel.class);
bootstrap.handler(new ChannelInboundHandlerAdapter());
cc = bootstrap.connect(sc.localAddress()).syncUninterruptibly().channel();
cc.writeAndFlush(Unpooled.wrappedBuffer(bytes)).syncUninterruptibly();
latch.await();
} finally {
if (cc != null) {
cc.close();
}
if (sc != null) {
sc.close();
}
group.shutdownGracefully();
}
}
use of org.apache.flink.shaded.netty4.io.netty.bootstrap.Bootstrap in project netty by netty.
the class FixedChannelPoolTest method testAcquireTimeout.
@Test(expected = TimeoutException.class)
public void testAcquireTimeout() throws Exception {
EventLoopGroup group = new LocalEventLoopGroup();
LocalAddress addr = new LocalAddress(LOCAL_ADDR_ID);
Bootstrap cb = new Bootstrap();
cb.remoteAddress(addr);
cb.group(group).channel(LocalChannel.class);
ServerBootstrap sb = new ServerBootstrap();
sb.group(group).channel(LocalServerChannel.class).childHandler(new ChannelInitializer<LocalChannel>() {
@Override
public void initChannel(LocalChannel ch) throws Exception {
ch.pipeline().addLast(new ChannelInboundHandlerAdapter());
}
});
// Start server
Channel sc = sb.bind(addr).syncUninterruptibly().channel();
ChannelPoolHandler handler = new TestChannelPoolHandler();
ChannelPool pool = new FixedChannelPool(cb, handler, ChannelHealthChecker.ACTIVE, AcquireTimeoutAction.FAIL, 500, 1, Integer.MAX_VALUE);
Channel channel = pool.acquire().syncUninterruptibly().getNow();
Future<Channel> future = pool.acquire();
try {
future.syncUninterruptibly();
} finally {
sc.close().syncUninterruptibly();
channel.close().syncUninterruptibly();
group.shutdownGracefully();
}
}
use of org.apache.flink.shaded.netty4.io.netty.bootstrap.Bootstrap in project pulsar by yahoo.
the class DiscoveryServiceTest method connectToService.
/**
* creates ClientHandler channel to connect and communicate with server
*
* @param serviceUrl
* @param latch
* @return
* @throws URISyntaxException
*/
public static NioEventLoopGroup connectToService(String serviceUrl, CountDownLatch latch, boolean tls) throws URISyntaxException {
NioEventLoopGroup workerGroup = new NioEventLoopGroup();
Bootstrap b = new Bootstrap();
b.group(workerGroup);
b.channel(NioSocketChannel.class);
b.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
if (tls) {
SslContextBuilder builder = SslContextBuilder.forClient();
builder.trustManager(InsecureTrustManagerFactory.INSTANCE);
X509Certificate[] certificates = SecurityUtility.loadCertificatesFromPemFile(TLS_CLIENT_CERT_FILE_PATH);
PrivateKey privateKey = SecurityUtility.loadPrivateKeyFromPemFile(TLS_CLIENT_KEY_FILE_PATH);
builder.keyManager(privateKey, (X509Certificate[]) certificates);
SslContext sslCtx = builder.build();
ch.pipeline().addLast("tls", sslCtx.newHandler(ch.alloc()));
}
ch.pipeline().addLast(new ClientHandler(latch));
}
});
URI uri = new URI(serviceUrl);
InetSocketAddress serviceAddress = new InetSocketAddress(uri.getHost(), uri.getPort());
b.connect(serviceAddress).addListener((ChannelFuture future) -> {
if (!future.isSuccess()) {
throw new IllegalStateException(future.cause());
}
});
return workerGroup;
}
use of org.apache.flink.shaded.netty4.io.netty.bootstrap.Bootstrap in project cradle by BingLau7.
the class EchoClient method start.
void start() throws Exception {
EventLoopGroup group = new NioEventLoopGroup();
try {
// 指定 EventLoopGroup 以处理客户端时间;需要适用于 NIO 的实现
Bootstrap b = new Bootstrap();
b.group(group).channel(NioSocketChannel.class).remoteAddress(new InetSocketAddress(host, port)).handler(new ChannelInitializer<SocketChannel>() {
protected void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new EchoClientHandler());
}
});
// 连接远程节点,阻塞等待直到连接完成
ChannelFuture f = b.connect().sync();
// 阻塞,直到 Channel 关闭
f.channel().closeFuture().sync();
} finally {
// 关闭线程池并释放所有的资源
group.shutdownGracefully().sync();
}
}
use of org.apache.flink.shaded.netty4.io.netty.bootstrap.Bootstrap in project async-http-client by AsyncHttpClient.
the class NettyRequestSender method sendRequestWithNewChannel.
private <//
T> //
ListenableFuture<T> sendRequestWithNewChannel(//
Request request, //
ProxyServer proxy, //
NettyResponseFuture<T> future, //
AsyncHandler<T> asyncHandler, boolean performingNextRequest) {
// some headers are only set when performing the first request
HttpHeaders headers = future.getNettyRequest().getHttpRequest().headers();
Realm realm = future.getRealm();
Realm proxyRealm = future.getProxyRealm();
requestFactory.addAuthorizationHeader(headers, perConnectionAuthorizationHeader(request, proxy, realm));
requestFactory.setProxyAuthorizationHeader(headers, perConnectionProxyAuthorizationHeader(request, proxyRealm));
future.setInAuth(realm != null && realm.isUsePreemptiveAuth() && realm.getScheme() != AuthScheme.NTLM);
future.setInProxyAuth(proxyRealm != null && proxyRealm.isUsePreemptiveAuth() && proxyRealm.getScheme() != AuthScheme.NTLM);
// Do not throw an exception when we need an extra connection for a redirect
// FIXME why? This violate the max connection per host handling, right?
Bootstrap bootstrap = channelManager.getBootstrap(request.getUri(), proxy);
Object partitionKey = future.getPartitionKey();
// we disable channelPreemption when performing next requests
final boolean acquireChannelLock = !performingNextRequest;
try {
// redirect.
if (acquireChannelLock) {
// if there's an exception here, channel wasn't preempted and resolve won't happen
channelManager.acquireChannelLock(partitionKey);
}
} catch (Throwable t) {
abort(null, future, getCause(t));
// exit and don't try to resolve address
return future;
}
scheduleRequestTimeout(future);
//
RequestHostnameResolver.INSTANCE.resolve(request, proxy, asyncHandler).addListener(new SimpleFutureListener<List<InetSocketAddress>>() {
@Override
protected void onSuccess(List<InetSocketAddress> addresses) {
NettyConnectListener<T> connectListener = new NettyConnectListener<>(future, NettyRequestSender.this, channelManager, acquireChannelLock, partitionKey);
NettyChannelConnector connector = new NettyChannelConnector(request.getLocalAddress(), addresses, asyncHandler, clientState, config);
if (!future.isDone()) {
connector.connect(bootstrap, connectListener);
} else if (acquireChannelLock) {
channelManager.releaseChannelLock(partitionKey);
}
}
@Override
protected void onFailure(Throwable cause) {
if (acquireChannelLock) {
channelManager.releaseChannelLock(partitionKey);
}
abort(null, future, getCause(cause));
}
});
return future;
}
Aggregations