use of org.apache.flink.shaded.netty4.io.netty.channel.socket.SocketChannel in project bgpcep by opendaylight.
the class PCEPDispatcherImpl method createServerBootstrap.
synchronized ServerBootstrap createServerBootstrap(final ChannelPipelineInitializer initializer) {
final ServerBootstrap b = new ServerBootstrap();
b.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(final SocketChannel ch) {
initializer.initializeChannel(ch, new DefaultPromise<>(PCEPDispatcherImpl.this.executor));
}
});
b.option(ChannelOption.SO_BACKLOG, SOCKET_BACKLOG_SIZE);
b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
if (Epoll.isAvailable()) {
b.channel(EpollServerSocketChannel.class);
b.childOption(EpollChannelOption.EPOLL_MODE, EpollMode.LEVEL_TRIGGERED);
} else {
b.channel(NioServerSocketChannel.class);
}
if (!this.keys.isEmpty()) {
if (Epoll.isAvailable()) {
b.option(EpollChannelOption.TCP_MD5SIG, this.keys);
} else {
throw new UnsupportedOperationException(Epoll.unavailabilityCause().getCause());
}
}
// Make sure we are doing round-robin processing
b.childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(1));
if (b.config().group() == null) {
b.group(this.bossGroup, this.workerGroup);
}
return b;
}
use of org.apache.flink.shaded.netty4.io.netty.channel.socket.SocketChannel in project incubator-pulsar by apache.
the class MockBrokerService method startMockBrokerService.
public void startMockBrokerService() throws Exception {
ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("mock-pulsar-%s").build();
final int numThreads = 2;
final int MaxMessageSize = 5 * 1024 * 1024;
try {
workerGroup = EventLoopUtil.newEventLoopGroup(numThreads, threadFactory);
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.group(workerGroup, workerGroup);
bootstrap.channel(EventLoopUtil.getServerSocketChannelClass(workerGroup));
bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder(MaxMessageSize, 0, 4, 0, 4));
ch.pipeline().addLast("handler", new MockServerCnx());
}
});
// Bind and start to accept incoming connections.
bootstrap.bind(brokerServicePort).sync();
} catch (Exception e) {
throw e;
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.socket.SocketChannel in project incubator-pulsar by apache.
the class DiscoveryServiceTest method connectToService.
/**
* creates ClientHandler channel to connect and communicate with server
*
* @param serviceUrl
* @param latch
* @return
* @throws URISyntaxException
*/
public static NioEventLoopGroup connectToService(String serviceUrl, CountDownLatch latch, boolean tls) throws URISyntaxException {
NioEventLoopGroup workerGroup = new NioEventLoopGroup();
Bootstrap b = new Bootstrap();
b.group(workerGroup);
b.channel(NioSocketChannel.class);
b.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
if (tls) {
SslContextBuilder builder = SslContextBuilder.forClient();
builder.trustManager(InsecureTrustManagerFactory.INSTANCE);
X509Certificate[] certificates = SecurityUtility.loadCertificatesFromPemFile(TLS_CLIENT_CERT_FILE_PATH);
PrivateKey privateKey = SecurityUtility.loadPrivateKeyFromPemFile(TLS_CLIENT_KEY_FILE_PATH);
builder.keyManager(privateKey, (X509Certificate[]) certificates);
SslContext sslCtx = builder.build();
ch.pipeline().addLast("tls", sslCtx.newHandler(ch.alloc()));
}
ch.pipeline().addLast(new ClientHandler(latch));
}
});
URI uri = new URI(serviceUrl);
InetSocketAddress serviceAddress = new InetSocketAddress(uri.getHost(), uri.getPort());
b.connect(serviceAddress).addListener((ChannelFuture future) -> {
if (!future.isSuccess()) {
throw new IllegalStateException(future.cause());
}
});
return workerGroup;
}
use of org.apache.flink.shaded.netty4.io.netty.channel.socket.SocketChannel in project java-driver by datastax.
the class TimeoutStressTest method host_state_should_be_maintained_with_timeouts.
/**
* <p/>
* Validates that under extreme timeout conditions the driver is able to properly maintain connection pools in
* addition to not leaking connections.
* <p/>
* <p/>
* Does the following:
* <ol>
* <li>Creates a table and loads 30k rows in a single partition.</li>
* <li>Sets the connection and read timeout {@link SocketOptions} to very low values.</li>
* <li>Spawns workers that concurrently execute queries.</li>
* <li>For some duration, repeatedly measures number of open socket connections and warns if exceeded.</li>
* <li>After a duration, resets {@link SocketOptions} to defaults.</li>
* <li>Wait for 20 seconds for reaper to remove old connections and restore pools.</li>
* <li>Ensure pools are restored.</li>
* <li>Shutdown session and ensure that there remains only 1 open connection.</li>
* </ol>
*
* @test_category connection:connection_pool
* @expected_result no connections leak and all host pools are maintained.
* @jira_ticket JAVA-692
* @since 2.0.10, 2.1.6
*/
@Test(groups = "stress")
public void host_state_should_be_maintained_with_timeouts() throws Exception {
insertRecords();
session().close();
// Set very low timeouts.
cluster().getConfiguration().getSocketOptions().setConnectTimeoutMillis(CONNECTION_TIMEOUT_IN_MS);
cluster().getConfiguration().getSocketOptions().setReadTimeoutMillis(READ_TIMEOUT_IN_MS);
Session newSession = cluster().connect(keyspace);
PreparedStatement statement = newSession.prepare("select * from record where name=? limit 1000;");
int workers = Runtime.getRuntime().availableProcessors();
ExecutorService workerPool = Executors.newFixedThreadPool(workers, new ThreadFactoryBuilder().setNameFormat("timeout-stress-test-worker-%d").setDaemon(true).build());
AtomicBoolean stopped = new AtomicBoolean(false);
// Ensure that we never exceed MaxConnectionsPerHost * nodes + 1 control connection.
int maxConnections = TestUtils.numberOfLocalCoreConnections(cluster()) * getContactPoints().size() + 1;
try {
Semaphore concurrentQueries = new Semaphore(CONCURRENT_QUERIES);
for (int i = 0; i < workers; i++) {
workerPool.submit(new TimeoutStressWorker(newSession, statement, concurrentQueries, stopped));
}
long startTime = System.currentTimeMillis();
while (System.currentTimeMillis() - startTime < DURATION) {
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
channelMonitor.report();
// Some connections that are being closed may have had active requests which are delegated to the
// reaper for cleanup later.
Collection<SocketChannel> openChannels = channelMonitor.openChannels(getContactPointsWithPorts());
// factor between retrieving open connections and checking the reaper.
if (openChannels.size() > maxConnections) {
logger.warn("{} of open channels: {} exceeds maximum expected: {}. " + "This could be because there are connections to be cleaned up in the reaper.", openChannels.size(), maxConnections, openChannels);
}
}
} finally {
stopped.set(true);
// Reset socket timeouts to allow pool to recover.
cluster().getConfiguration().getSocketOptions().setConnectTimeoutMillis(SocketOptions.DEFAULT_CONNECT_TIMEOUT_MILLIS);
cluster().getConfiguration().getSocketOptions().setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS);
logger.debug("Sleeping 20 seconds to allow connection reaper to clean up connections " + "and for the pools to recover.");
Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS);
Collection<SocketChannel> openChannels = channelMonitor.openChannels(getContactPointsWithPorts());
assertThat(openChannels.size()).as("Number of open connections does not meet expected: %s", openChannels).isLessThanOrEqualTo(maxConnections);
// Each host should be in an up state.
assertThat(cluster()).host(1).comesUpWithin(0, TimeUnit.SECONDS);
assertThat(cluster()).host(2).comesUpWithin(0, TimeUnit.SECONDS);
assertThat(cluster()).host(3).comesUpWithin(0, TimeUnit.SECONDS);
newSession.close();
openChannels = channelMonitor.openChannels(getContactPointsWithPorts());
assertThat(openChannels.size()).as("Number of open connections does not meet expected: %s", openChannels).isEqualTo(1);
workerPool.shutdown();
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.socket.SocketChannel in project java-driver by datastax.
the class NettyOptionsTest method should_invoke_netty_options_hooks.
private void should_invoke_netty_options_hooks(int hosts, int coreConnections) throws Exception {
NettyOptions nettyOptions = mock(NettyOptions.class, CALLS_REAL_METHODS.get());
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
Timer timer = new HashedWheelTimer();
doReturn(eventLoopGroup).when(nettyOptions).eventLoopGroup(any(ThreadFactory.class));
doReturn(timer).when(nettyOptions).timer(any(ThreadFactory.class));
final ChannelHandler handler = mock(ChannelHandler.class);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
SocketChannel channel = (SocketChannel) invocation.getArguments()[0];
channel.pipeline().addLast("test-handler", handler);
return null;
}
}).when(nettyOptions).afterChannelInitialized(any(SocketChannel.class));
Cluster cluster = register(Cluster.builder().addContactPoints(getContactPoints().get(0)).withPort(ccm().getBinaryPort()).withPoolingOptions(new PoolingOptions().setConnectionsPerHost(HostDistance.LOCAL, coreConnections, coreConnections)).withNettyOptions(nettyOptions).build());
// when
// force session creation to populate pools
cluster.connect();
int expectedNumberOfCalls = TestUtils.numberOfLocalCoreConnections(cluster) * hosts + 1;
// If the driver supports a more recent protocol version than C*, the negotiation at startup
// will open an additional connection for each protocol version tried.
ProtocolVersion version = ProtocolVersion.NEWEST_SUPPORTED;
ProtocolVersion usedVersion = ccm().getProtocolVersion();
while (version != usedVersion && version != null) {
version = version.getLowerSupported();
expectedNumberOfCalls++;
}
cluster.close();
// then
verify(nettyOptions, times(1)).eventLoopGroup(any(ThreadFactory.class));
verify(nettyOptions, times(1)).channelClass();
verify(nettyOptions, times(1)).timer(any(ThreadFactory.class));
// per-connection hooks will be called coreConnections * hosts + 1 times:
// the extra call is for the control connection
verify(nettyOptions, times(expectedNumberOfCalls)).afterBootstrapInitialized(any(Bootstrap.class));
verify(nettyOptions, times(expectedNumberOfCalls)).afterChannelInitialized(any(SocketChannel.class));
verify(handler, times(expectedNumberOfCalls)).handlerAdded(any(ChannelHandlerContext.class));
verify(handler, times(expectedNumberOfCalls)).handlerRemoved(any(ChannelHandlerContext.class));
verify(nettyOptions, times(1)).onClusterClose(eventLoopGroup);
verify(nettyOptions, times(1)).onClusterClose(timer);
verifyNoMoreInteractions(nettyOptions);
}
Aggregations