use of io.netty.channel.socket.SocketChannel in project java-driver by datastax.
the class TimeoutStressTest method host_state_should_be_maintained_with_timeouts.
/**
* <p/>
* Validates that under extreme timeout conditions the driver is able to properly maintain connection pools in
* addition to not leaking connections.
* <p/>
* <p/>
* Does the following:
* <ol>
* <li>Creates a table and loads 30k rows in a single partition.</li>
* <li>Sets the connection and read timeout {@link SocketOptions} to very low values.</li>
* <li>Spawns workers that concurrently execute queries.</li>
* <li>For some duration, repeatedly measures number of open socket connections and warns if exceeded.</li>
* <li>After a duration, resets {@link SocketOptions} to defaults.</li>
* <li>Wait for 20 seconds for reaper to remove old connections and restore pools.</li>
* <li>Ensure pools are restored.</li>
* <li>Shutdown session and ensure that there remains only 1 open connection.</li>
* </ol>
*
* @test_category connection:connection_pool
* @expected_result no connections leak and all host pools are maintained.
* @jira_ticket JAVA-692
* @since 2.0.10, 2.1.6
*/
@Test(groups = "stress")
public void host_state_should_be_maintained_with_timeouts() throws Exception {
insertRecords();
session().close();
// Set very low timeouts.
cluster().getConfiguration().getSocketOptions().setConnectTimeoutMillis(CONNECTION_TIMEOUT_IN_MS);
cluster().getConfiguration().getSocketOptions().setReadTimeoutMillis(READ_TIMEOUT_IN_MS);
Session newSession = cluster().connect(keyspace);
PreparedStatement statement = newSession.prepare("select * from record where name=? limit 1000;");
int workers = Runtime.getRuntime().availableProcessors();
ExecutorService workerPool = Executors.newFixedThreadPool(workers, new ThreadFactoryBuilder().setNameFormat("timeout-stress-test-worker-%d").setDaemon(true).build());
AtomicBoolean stopped = new AtomicBoolean(false);
// Ensure that we never exceed MaxConnectionsPerHost * nodes + 1 control connection.
int maxConnections = TestUtils.numberOfLocalCoreConnections(cluster()) * getContactPoints().size() + 1;
try {
Semaphore concurrentQueries = new Semaphore(CONCURRENT_QUERIES);
for (int i = 0; i < workers; i++) {
workerPool.submit(new TimeoutStressWorker(newSession, statement, concurrentQueries, stopped));
}
long startTime = System.currentTimeMillis();
while (System.currentTimeMillis() - startTime < DURATION) {
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
channelMonitor.report();
// Some connections that are being closed may have had active requests which are delegated to the
// reaper for cleanup later.
Collection<SocketChannel> openChannels = channelMonitor.openChannels(getContactPointsWithPorts());
// factor between retrieving open connections and checking the reaper.
if (openChannels.size() > maxConnections) {
logger.warn("{} of open channels: {} exceeds maximum expected: {}. " + "This could be because there are connections to be cleaned up in the reaper.", openChannels.size(), maxConnections, openChannels);
}
}
} finally {
stopped.set(true);
// Reset socket timeouts to allow pool to recover.
cluster().getConfiguration().getSocketOptions().setConnectTimeoutMillis(SocketOptions.DEFAULT_CONNECT_TIMEOUT_MILLIS);
cluster().getConfiguration().getSocketOptions().setReadTimeoutMillis(SocketOptions.DEFAULT_READ_TIMEOUT_MILLIS);
logger.debug("Sleeping 20 seconds to allow connection reaper to clean up connections " + "and for the pools to recover.");
Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS);
Collection<SocketChannel> openChannels = channelMonitor.openChannels(getContactPointsWithPorts());
assertThat(openChannels.size()).as("Number of open connections does not meet expected: %s", openChannels).isLessThanOrEqualTo(maxConnections);
// Each host should be in an up state.
assertThat(cluster()).host(1).comesUpWithin(0, TimeUnit.SECONDS);
assertThat(cluster()).host(2).comesUpWithin(0, TimeUnit.SECONDS);
assertThat(cluster()).host(3).comesUpWithin(0, TimeUnit.SECONDS);
newSession.close();
openChannels = channelMonitor.openChannels(getContactPointsWithPorts());
assertThat(openChannels.size()).as("Number of open connections does not meet expected: %s", openChannels).isEqualTo(1);
workerPool.shutdown();
}
}
use of io.netty.channel.socket.SocketChannel in project java-driver by datastax.
the class NettyOptionsTest method should_invoke_netty_options_hooks.
private void should_invoke_netty_options_hooks(int hosts, int coreConnections) throws Exception {
NettyOptions nettyOptions = mock(NettyOptions.class, CALLS_REAL_METHODS.get());
EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
Timer timer = new HashedWheelTimer();
doReturn(eventLoopGroup).when(nettyOptions).eventLoopGroup(any(ThreadFactory.class));
doReturn(timer).when(nettyOptions).timer(any(ThreadFactory.class));
final ChannelHandler handler = mock(ChannelHandler.class);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
SocketChannel channel = (SocketChannel) invocation.getArguments()[0];
channel.pipeline().addLast("test-handler", handler);
return null;
}
}).when(nettyOptions).afterChannelInitialized(any(SocketChannel.class));
Cluster cluster = register(Cluster.builder().addContactPoints(getContactPoints().get(0)).withPort(ccm().getBinaryPort()).withPoolingOptions(new PoolingOptions().setConnectionsPerHost(HostDistance.LOCAL, coreConnections, coreConnections)).withNettyOptions(nettyOptions).build());
// when
// force session creation to populate pools
cluster.connect();
int expectedNumberOfCalls = TestUtils.numberOfLocalCoreConnections(cluster) * hosts + 1;
// If the driver supports a more recent protocol version than C*, the negotiation at startup
// will open an additional connection for each protocol version tried.
ProtocolVersion version = ProtocolVersion.NEWEST_SUPPORTED;
ProtocolVersion usedVersion = ccm().getProtocolVersion();
while (version != usedVersion && version != null) {
version = version.getLowerSupported();
expectedNumberOfCalls++;
}
cluster.close();
// then
verify(nettyOptions, times(1)).eventLoopGroup(any(ThreadFactory.class));
verify(nettyOptions, times(1)).channelClass();
verify(nettyOptions, times(1)).timer(any(ThreadFactory.class));
// per-connection hooks will be called coreConnections * hosts + 1 times:
// the extra call is for the control connection
verify(nettyOptions, times(expectedNumberOfCalls)).afterBootstrapInitialized(any(Bootstrap.class));
verify(nettyOptions, times(expectedNumberOfCalls)).afterChannelInitialized(any(SocketChannel.class));
verify(handler, times(expectedNumberOfCalls)).handlerAdded(any(ChannelHandlerContext.class));
verify(handler, times(expectedNumberOfCalls)).handlerRemoved(any(ChannelHandlerContext.class));
verify(nettyOptions, times(1)).onClusterClose(eventLoopGroup);
verify(nettyOptions, times(1)).onClusterClose(timer);
verifyNoMoreInteractions(nettyOptions);
}
use of io.netty.channel.socket.SocketChannel in project cosmic by MissionCriticalCloud.
the class NfsSecondaryStorageResource method startPostUploadServer.
private void startPostUploadServer() {
final int PORT = 8210;
final int NO_OF_WORKERS = 15;
final EventLoopGroup bossGroup = new NioEventLoopGroup(1);
final EventLoopGroup workerGroup = new NioEventLoopGroup(NO_OF_WORKERS);
final ServerBootstrap b = new ServerBootstrap();
final NfsSecondaryStorageResource storageResource = this;
b.group(bossGroup, workerGroup);
b.channel(NioServerSocketChannel.class);
b.handler(new LoggingHandler(LogLevel.INFO));
b.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(final SocketChannel ch) throws Exception {
final ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(new HttpRequestDecoder());
pipeline.addLast(new HttpResponseEncoder());
pipeline.addLast(new HttpContentCompressor());
pipeline.addLast(new HttpUploadServerHandler(storageResource));
}
});
new Thread() {
@Override
public void run() {
try {
final Channel ch = b.bind(PORT).sync().channel();
s_logger.info(String.format("Started post upload server on port %d with %d workers", PORT, NO_OF_WORKERS));
ch.closeFuture().sync();
} catch (final InterruptedException e) {
s_logger.info("Failed to start post upload server");
s_logger.debug("Exception while starting post upload server", e);
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
s_logger.info("shutting down post upload server");
}
}
}.start();
s_logger.info("created a thread to start post upload server");
}
use of io.netty.channel.socket.SocketChannel in project autobahn-java by crossbario.
the class NettyWebSocket method connect.
@Override
public void connect(ITransportHandler transportHandler, TransportOptions options) throws Exception {
if (options == null) {
if (mOptions == null) {
options = new TransportOptions();
} else {
options = new TransportOptions();
options.setAutoPingInterval(mOptions.getAutoPingInterval());
options.setAutoPingTimeout(mOptions.getAutoPingTimeout());
options.setMaxFramePayloadSize(mOptions.getMaxFramePayloadSize());
}
}
URI uri;
uri = new URI(mUri);
int port = validateURIAndGetPort(uri);
String scheme = uri.getScheme();
String host = uri.getHost();
final SslContext sslContext = getSSLContext(scheme);
WebSocketClientHandshaker handshaker = WebSocketClientHandshakerFactory.newHandshaker(uri, WebSocketVersion.V13, mSerializers, true, new DefaultHttpHeaders(), options.getMaxFramePayloadSize());
mHandler = new NettyWebSocketClientHandler(handshaker, this, transportHandler);
EventLoopGroup group = new NioEventLoopGroup();
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(group);
bootstrap.channel(NioSocketChannel.class);
TransportOptions opt = options;
bootstrap.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline channelPipeline = ch.pipeline();
if (sslContext != null) {
channelPipeline.addLast(sslContext.newHandler(ch.alloc(), host, port));
}
channelPipeline.addLast(new HttpClientCodec(), new HttpObjectAggregator(8192), WebSocketClientCompressionHandler.INSTANCE, new IdleStateHandler(opt.getAutoPingInterval() + opt.getAutoPingTimeout(), opt.getAutoPingInterval(), 0, TimeUnit.SECONDS), mHandler);
}
});
mChannel = bootstrap.connect(uri.getHost(), port).sync().channel();
mHandler.getHandshakeFuture().sync();
}
use of io.netty.channel.socket.SocketChannel in project bgpcep by opendaylight.
the class PCCDispatcherImpl method createClient.
@Override
@SuppressWarnings("unchecked")
public Future<PCEPSession> createClient(final InetSocketAddress remoteAddress, final long reconnectTime, final PCEPSessionListenerFactory listenerFactory, final PCEPSessionNegotiatorFactory negotiatorFactory, final KeyMapping keys, final InetSocketAddress localAddress, final BigInteger dbVersion) {
final Bootstrap b = new Bootstrap();
b.group(this.workerGroup);
b.localAddress(localAddress);
setChannelFactory(b, keys);
b.option(ChannelOption.SO_KEEPALIVE, true);
b.option(ChannelOption.SO_REUSEADDR, true);
b.option(ChannelOption.RCVBUF_ALLOCATOR, new io.netty.channel.FixedRecvByteBufAllocator(1));
final long retryTimer = reconnectTime == -1 ? 0 : reconnectTime;
final PCCReconnectPromise promise = new PCCReconnectPromise(remoteAddress, (int) retryTimer, CONNECT_TIMEOUT, b);
final ChannelInitializer<SocketChannel> channelInitializer = new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(final SocketChannel ch) {
ch.pipeline().addLast(PCCDispatcherImpl.this.factory.getDecoders());
ch.pipeline().addLast("negotiator", negotiatorFactory.getSessionNegotiator(new PCEPSessionNegotiatorFactoryDependencies() {
@Override
public PCEPSessionListenerFactory getListenerFactory() {
return listenerFactory;
}
@Override
public PCEPPeerProposal getPeerProposal() {
return new PCCPeerProposal(dbVersion);
}
}, ch, promise));
ch.pipeline().addLast(PCCDispatcherImpl.this.factory.getEncoders());
ch.pipeline().addLast(new ChannelInboundHandlerAdapter() {
@Override
public void channelInactive(final ChannelHandlerContext ctx) {
if (promise.isCancelled()) {
return;
}
if (!promise.isInitialConnectFinished()) {
LOG.debug("Connection to {} was dropped during negotiation, reattempting", remoteAddress);
return;
}
LOG.debug("Reconnecting after connection to {} was dropped", remoteAddress);
PCCDispatcherImpl.this.createClient(remoteAddress, reconnectTime, listenerFactory, negotiatorFactory, keys, localAddress, dbVersion);
}
});
}
};
b.handler(channelInitializer);
promise.connect();
return promise;
}
Aggregations