use of org.apache.flink.shaded.netty4.io.netty.channel.epoll.EpollEventLoopGroup in project pravega by pravega.
the class AbstractConnectionListener method startListening.
/**
* Initializes the connection listener internals and starts listening.
*/
public void startListening() {
final AtomicReference<SslContext> sslCtx = this.enableTls ? new AtomicReference<>(TLSHelper.newServerSslContext(pathToTlsCertFile, pathToTlsKeyFile, tlsProtocolVersion)) : null;
boolean nio = false;
try {
bossGroup = new EpollEventLoopGroup(1);
workerGroup = new EpollEventLoopGroup();
} catch (ExceptionInInitializerError | UnsatisfiedLinkError | NoClassDefFoundError e) {
nio = true;
bossGroup = new NioEventLoopGroup(1);
workerGroup = new NioEventLoopGroup();
}
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup).channel(nio ? NioServerSocketChannel.class : EpollServerSocketChannel.class).option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO)).childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) {
ChannelPipeline p = ch.pipeline();
// Add SslHandler to the channel's pipeline, if TLS is enabled.
if (enableTls) {
SslHandler sslHandler = sslCtx.get().newHandler(ch.alloc());
// We add a name to SSL/TLS handler, unlike the other handlers added later, to make it
// easier to find and replace the handler.
p.addLast(TLSHelper.TLS_HANDLER_NAME, sslHandler);
}
// Configure the class-specific encoder stack and request processors.
ServerConnectionInboundHandler lsh = new ServerConnectionInboundHandler();
createEncodingStack(ch.remoteAddress().toString()).forEach(p::addLast);
lsh.setRequestProcessor(createRequestProcessor(new TrackedConnection(lsh, connectionTracker)));
p.addLast(lsh);
}
});
if (enableTls && enableTlsReload) {
enableTlsContextReload(sslCtx);
}
// Start the server.
serverChannel = b.bind(host, port).awaitUninterruptibly().channel();
if (healthServiceManager != null) {
healthServiceManager.register(new ConnectionListenerHealthContributor(this));
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.epoll.EpollEventLoopGroup in project pravega by pravega.
the class ConnectionPoolingTest method setUp.
@Before
public void setUp() throws Exception {
// Configure SSL.
port = TestUtils.getAvailableListenPort();
final SslContext sslCtx;
if (ssl) {
try {
sslCtx = SslContextBuilder.forServer(new File(SecurityConfigDefaults.TLS_SERVER_CERT_PATH), new File(SecurityConfigDefaults.TLS_SERVER_PRIVATE_KEY_PATH)).build();
} catch (SSLException e) {
throw new RuntimeException(e);
}
} else {
sslCtx = null;
}
boolean nio = false;
EventLoopGroup bossGroup;
EventLoopGroup workerGroup;
try {
bossGroup = new EpollEventLoopGroup(1);
workerGroup = new EpollEventLoopGroup();
} catch (ExceptionInInitializerError | UnsatisfiedLinkError | NoClassDefFoundError e) {
nio = true;
bossGroup = new NioEventLoopGroup(1);
workerGroup = new NioEventLoopGroup();
}
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup).channel(nio ? NioServerSocketChannel.class : EpollServerSocketChannel.class).option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO)).childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
if (sslCtx != null) {
SslHandler handler = sslCtx.newHandler(ch.alloc());
SSLEngine sslEngine = handler.engine();
SSLParameters sslParameters = sslEngine.getSSLParameters();
sslParameters.setEndpointIdentificationAlgorithm("LDAPS");
sslEngine.setSSLParameters(sslParameters);
p.addLast(handler);
}
p.addLast(new CommandEncoder(null, NO_OP_METRIC_NOTIFIER), new LengthFieldBasedFrameDecoder(MAX_WIRECOMMAND_SIZE, 4, 4), new CommandDecoder(), new EchoServerHandler());
}
});
// Start the server.
serverChannel = b.bind("localhost", port).awaitUninterruptibly().channel();
}
use of org.apache.flink.shaded.netty4.io.netty.channel.epoll.EpollEventLoopGroup in project beam by apache.
the class ManagedChannelFactory method forDescriptor.
public ManagedChannel forDescriptor(ApiServiceDescriptor apiServiceDescriptor) {
ManagedChannelBuilder<?> channelBuilder;
switch(type) {
case EPOLL:
SocketAddress address = SocketAddressFactory.createFrom(apiServiceDescriptor.getUrl());
channelBuilder = NettyChannelBuilder.forAddress(address).channelType(address instanceof DomainSocketAddress ? EpollDomainSocketChannel.class : EpollSocketChannel.class).eventLoopGroup(new EpollEventLoopGroup());
break;
case DEFAULT:
channelBuilder = ManagedChannelBuilder.forTarget(apiServiceDescriptor.getUrl());
break;
case IN_PROCESS:
channelBuilder = InProcessChannelBuilder.forName(apiServiceDescriptor.getUrl());
break;
default:
throw new IllegalStateException("Unknown type " + type);
}
channelBuilder = channelBuilder.usePlaintext().maxInboundMessageSize(Integer.MAX_VALUE).intercept(interceptors);
if (directExecutor) {
channelBuilder = channelBuilder.directExecutor();
}
return channelBuilder.build();
}
use of org.apache.flink.shaded.netty4.io.netty.channel.epoll.EpollEventLoopGroup in project aerospike-client-java by aerospike.
the class SuiteAsync method init.
@BeforeClass
public static void init() {
System.out.println("Begin AerospikeClient");
Args args = Args.Instance;
EventPolicy eventPolicy = new EventPolicy();
switch(args.eventLoopType) {
default:
case DIRECT_NIO:
{
eventLoops = new NioEventLoops(eventPolicy, 1);
break;
}
case NETTY_NIO:
{
EventLoopGroup group = new NioEventLoopGroup(1);
eventLoops = new NettyEventLoops(eventPolicy, group, args.eventLoopType);
break;
}
case NETTY_EPOLL:
{
EventLoopGroup group = new EpollEventLoopGroup(1);
eventLoops = new NettyEventLoops(eventPolicy, group, args.eventLoopType);
break;
}
case NETTY_KQUEUE:
{
EventLoopGroup group = new KQueueEventLoopGroup(1);
eventLoops = new NettyEventLoops(eventPolicy, group, args.eventLoopType);
break;
}
case NETTY_IOURING:
{
EventLoopGroup group = new IOUringEventLoopGroup(1);
eventLoops = new NettyEventLoops(eventPolicy, group, args.eventLoopType);
break;
}
}
try {
ClientPolicy policy = new ClientPolicy();
policy.eventLoops = eventLoops;
policy.user = args.user;
policy.password = args.password;
policy.authMode = args.authMode;
policy.tlsPolicy = args.tlsPolicy;
Host[] hosts = Host.parseHosts(args.host, args.port);
eventLoop = eventLoops.get(0);
client = new AerospikeClient(policy, hosts);
try {
args.setServerSpecific(client);
} catch (RuntimeException re) {
client.close();
throw re;
}
} catch (Exception e) {
eventLoops.close();
throw e;
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.epoll.EpollEventLoopGroup in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method getPoolMap.
protected ChannelPoolMap<InetSocketAddress, SimpleChannelPool> getPoolMap() {
ChannelPoolMap<InetSocketAddress, SimpleChannelPool> result = poolMap;
if (poolMap == null) {
/*
This method gets called for every downstream call, so we don't want to synchronize the whole method. But
it's easy for multiple threads to get here at the same time when the server starts up, so we need *some*
kind of protection around the creation of poolMap, hence the elaborate (but correct) double-checked
locking. Since poolMap is volatile this works, and the local variable "result" helps with speed during
the normal case where poolMap has already been initialized.
See https://en.wikipedia.org/wiki/Double-checked_locking
*/
synchronized (this) {
result = poolMap;
if (result == null) {
EventLoopGroup eventLoopGroup;
Class<? extends SocketChannel> channelClass;
if (Epoll.isAvailable()) {
logger.info("Creating channel pool. The epoll native transport is available. Using epoll instead of " + "NIO. proxy_router_using_native_epoll_transport=true");
eventLoopGroup = new EpollEventLoopGroup(0, createProxyRouterThreadFactory());
channelClass = EpollSocketChannel.class;
} else {
logger.info("Creating channel pool. The epoll native transport is NOT available or you are not running " + "on a compatible OS/architecture. Using NIO. " + "proxy_router_using_native_epoll_transport=false");
eventLoopGroup = new NioEventLoopGroup(0, createProxyRouterThreadFactory());
channelClass = NioSocketChannel.class;
}
result = new AbstractChannelPoolMap<InetSocketAddress, SimpleChannelPool>() {
@Override
protected SimpleChannelPool newPool(InetSocketAddress key) {
return new SimpleChannelPool(generateClientBootstrap(eventLoopGroup, channelClass).remoteAddress(key), new ChannelPoolHandlerImpl(), CHANNEL_HEALTH_CHECK_INSTANCE) {
@Override
public Future<Void> release(Channel channel, Promise<Void> promise) {
markChannelBrokenAndLogInfoIfHttpClientCodecStateIsNotZero(channel, "Releasing channel back to pool");
return super.release(channel, promise);
}
@Override
protected Channel pollChannel() {
Channel channel = super.pollChannel();
if (channel != null) {
markChannelBrokenAndLogInfoIfHttpClientCodecStateIsNotZero(channel, "Polling channel to be reused before healthcheck");
if (idleChannelTimeoutMillis > 0) {
/*
We have a channel that is about to be re-used, so disable the idle channel
timeout detector if it exists. By disabling it here we make sure that it is
effectively "gone" before the healthcheck happens, preventing race
conditions. Note that we can't call pipeline.remove() here because we may
not be in the pipeline's event loop, so calling pipeline.remove() could
lead to thread deadlock, but we can't call channel.eventLoop().execute()
because we need it disabled *now* before the healthcheck happens. The
pipeline preparation phase will remove it safely soon, and in the meantime
it will be disabled.
*/
ChannelPipeline pipeline = channel.pipeline();
ChannelHandler idleHandler = pipeline.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
if (idleHandler != null) {
((DownstreamIdleChannelTimeoutHandler) idleHandler).disableTimeoutHandling();
}
}
}
return channel;
}
@Override
protected boolean offerChannel(Channel channel) {
if (idleChannelTimeoutMillis > 0) {
// Add an idle channel timeout detector. This will be removed before the
// channel's reacquisition healthcheck runs (in pollChannel()), so we won't
// have a race condition where this channel is handed over for use but gets
// squashed right before it's about to be used.
// NOTE: Due to the semantics of pool.release() we're guaranteed to be in the
// channel's event loop, so there's no chance of a thread deadlock when
// messing with the pipeline.
channel.pipeline().addFirst(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME, new DownstreamIdleChannelTimeoutHandler(idleChannelTimeoutMillis, () -> true, false, "StreamingAsyncHttpClientChannel-idle", null, null));
}
return super.offerChannel(channel);
}
};
}
};
poolMap = result;
}
}
}
return result;
}
Aggregations