use of org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory in project bigbluebutton by bigbluebutton.
the class Client method connect.
/**
* Attempt to establish an authenticated connection to the nominated FreeSWITCH ESL server socket.
* This call will block, waiting for an authentication handshake to occur, or timeout after the
* supplied number of seconds.
*
* @param host can be either ip address or hostname
* @param port tcp port that server socket is listening on (set in event_socket_conf.xml)
* @param password server event socket is expecting (set in event_socket_conf.xml)
* @param timeoutSeconds number of seconds to wait for the server socket before aborting
*/
public void connect(String host, int port, String password, int timeoutSeconds) throws InboundConnectionFailure {
// If already connected, disconnect first
if (canSend()) {
close();
}
// Configure this client
ClientBootstrap bootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool()));
// Add ESL handler and factory
InboundClientHandler handler = new InboundClientHandler(password, protocolListener);
bootstrap.setPipelineFactory(new InboundPipelineFactory(handler));
// Attempt connection
ChannelFuture future = bootstrap.connect(new InetSocketAddress(host, port));
// Wait till attempt succeeds, fails or timeouts
if (!future.awaitUninterruptibly(timeoutSeconds, TimeUnit.SECONDS)) {
throw new InboundConnectionFailure("Timeout connecting to " + host + ":" + port);
}
// Did not timeout
channel = future.getChannel();
// But may have failed anyway
if (!future.isSuccess()) {
log.warn("Failed to connect to [{}:{}]", host, port);
log.warn(" * reason: {}", future.getCause());
channel = null;
bootstrap.releaseExternalResources();
throw new InboundConnectionFailure("Could not connect to " + host + ":" + port, future.getCause());
}
// Wait for the authentication handshake to call back
while (!authenticatorResponded.get()) {
try {
Thread.sleep(250);
} catch (InterruptedException e) {
// ignore
}
}
if (!authenticated) {
throw new InboundConnectionFailure("Authentication failed: " + authenticationResponse.getReplyText());
}
}
use of org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory in project databus by linkedin.
the class TestDatabusRelayMain method testClient.
private void testClient(int relayPort, int fetchSize, long scn, HttpResponseHandler handler) throws Exception {
Checkpoint ckpt = Checkpoint.createOnlineConsumptionCheckpoint(scn);
//TODO why is this needed
//ckpt.setCatchupSource("foo");
String uristr = "/stream?sources=105&output=json&size=" + fetchSize + "&streamFromLatestScn=false&checkPoint=" + ckpt.toString();
ClientBootstrap bootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool()));
bootstrap.setPipelineFactory(new HttpClientPipelineFactory(handler));
ChannelFuture future = bootstrap.connect(new InetSocketAddress("localhost", relayPort));
Channel channel = future.awaitUninterruptibly().getChannel();
Assert.assertTrue(future.isSuccess(), "Cannot connect to relay at localhost:" + relayPort);
HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uristr);
request.setHeader(HttpHeaders.Names.HOST, "localhost");
channel.write(request);
channel.getCloseFuture().awaitUninterruptibly();
}
use of org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory in project neo4j by neo4j.
the class Client method start.
@Override
public void start() {
bootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(newCachedThreadPool(daemon(getClass().getSimpleName() + "-boss@" + destination)), newCachedThreadPool(daemon(getClass().getSimpleName() + "-worker@" + destination))));
bootstrap.setPipelineFactory(this);
channelPool = new ResourcePool<ChannelContext>(maxUnusedChannels, new ResourcePool.CheckStrategy.TimeoutCheckStrategy(DEFAULT_CHECK_INTERVAL, Clocks.systemClock()), new LoggingResourcePoolMonitor(msgLog)) {
@Override
protected ChannelContext create() {
msgLog.info(threadInfo() + "Trying to open a new channel from " + origin + " to " + destination, true);
// We must specify the origin address in case the server has multiple IPs per interface
ChannelFuture channelFuture = bootstrap.connect(destination, origin);
channelFuture.awaitUninterruptibly(5, TimeUnit.SECONDS);
if (channelFuture.isSuccess()) {
msgLog.info(threadInfo() + "Opened a new channel from " + channelFuture.getChannel().getLocalAddress() + " to " + channelFuture.getChannel().getRemoteAddress());
return new ChannelContext(channelFuture.getChannel(), ChannelBuffers.dynamicBuffer(), ByteBuffer.allocate(1024 * 1024));
}
Throwable cause = channelFuture.getCause();
String msg = Client.this.getClass().getSimpleName() + " could not connect from " + origin + " to " + destination;
msgLog.debug(msg, true);
throw traceComException(new ComException(msg, cause), "Client.start");
}
@Override
protected boolean isAlive(ChannelContext context) {
return context.channel().isConnected();
}
@Override
protected void dispose(ChannelContext context) {
Channel channel = context.channel();
if (channel.isConnected()) {
msgLog.info(threadInfo() + "Closing: " + context + ". " + "Channel pool size is now " + currentSize());
channel.close();
}
}
private String threadInfo() {
return "Thread[" + Thread.currentThread().getId() + ", " + Thread.currentThread().getName() + "] ";
}
};
/*
* This is here to couple the channel releasing to Response.close() itself and not
* to TransactionStream.close() as it is implemented here. The reason is that a Response
* that is returned without a TransactionStream will still hold the channel and should
* release it eventually. Also, logically, closing the channel is not dependent on the
* TransactionStream.
*/
resourcePoolReleaser = () -> {
if (channelPool != null) {
channelPool.release();
}
};
}
use of org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory in project camel by apache.
the class NettyProducer method setupTCPCommunication.
protected void setupTCPCommunication() throws Exception {
if (channelFactory == null) {
// prefer using explicit configured thread pools
BossPool bp = configuration.getBossPool();
WorkerPool wp = configuration.getWorkerPool();
if (bp == null) {
// create new pool which we should shutdown when stopping as its not shared
bossPool = new NettyClientBossPoolBuilder().withTimer(getEndpoint().getTimer()).withBossCount(configuration.getBossCount()).withName("NettyClientTCPBoss").build();
bp = bossPool;
}
if (wp == null) {
// create new pool which we should shutdown when stopping as its not shared
workerPool = new NettyWorkerPoolBuilder().withWorkerCount(configuration.getWorkerCount()).withName("NettyClientTCPWorker").build();
wp = workerPool;
}
channelFactory = new NioClientSocketChannelFactory(bp, wp);
}
}
use of org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory in project pinpoint by naver.
the class DefaultPinpointClientFactory method createChannelFactory.
private NioClientSocketChannelFactory createChannelFactory(int bossCount, int workerCount, Timer timer) {
ExecutorService boss = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Client-Boss", true));
NioClientBossPool bossPool = new NioClientBossPool(boss, bossCount, timer, ThreadNameDeterminer.CURRENT);
ExecutorService worker = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Client-Worker", true));
NioWorkerPool workerPool = new NioWorkerPool(worker, workerCount, ThreadNameDeterminer.CURRENT);
return new NioClientSocketChannelFactory(bossPool, workerPool);
}
Aggregations