use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project NabAlive by jcheype.
the class NabaliveServer method start.
@PostConstruct
public void start() {
logger.info("Starting server.");
// Configure the server.
bootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool()));
// Set up the pipeline factory.
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = pipeline();
pipeline.addLast("timeout", new IdleStateHandler(timer, 0, 0, 20));
pipeline.addLast("nabaliveServerHandler", nabaliveServerHandler);
return pipeline;
}
});
bootstrap.setOption("reuseAddress", true);
// Bind and start to accept incoming connections.
bind = bootstrap.bind(new InetSocketAddress(XMPP_PORT));
}
use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project hadoop by apache.
the class SimpleTcpServer method run.
public void run() {
// Configure the Server.
ChannelFactory factory;
if (workerCount == 0) {
// Use default workers: 2 * the number of available processors
factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool());
} else {
factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), workerCount);
}
server = new ServerBootstrap(factory);
server.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(RpcUtil.constructRpcFrameDecoder(), RpcUtil.STAGE_RPC_MESSAGE_PARSER, rpcProgram, RpcUtil.STAGE_RPC_TCP_RESPONSE);
}
});
server.setOption("child.tcpNoDelay", true);
server.setOption("child.keepAlive", true);
server.setOption("child.reuseAddress", true);
server.setOption("reuseAddress", true);
// Listen to TCP port
ch = server.bind(new InetSocketAddress(port));
InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress();
boundPort = socketAddr.getPort();
LOG.info("Started listening to TCP requests at port " + boundPort + " for " + rpcProgram + " with workerCount " + workerCount);
}
use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project camel by apache.
the class SingleTCPNettyServerBootstrapFactory method startServerBootstrap.
protected void startServerBootstrap() {
// prefer using explicit configured thread pools
BossPool bp = configuration.getBossPool();
WorkerPool wp = configuration.getWorkerPool();
if (bp == null) {
// create new pool which we should shutdown when stopping as its not shared
bossPool = new NettyServerBossPoolBuilder().withBossCount(configuration.getBossCount()).withName("NettyServerTCPBoss").build();
bp = bossPool;
}
if (wp == null) {
// create new pool which we should shutdown when stopping as its not shared
workerPool = new NettyWorkerPoolBuilder().withWorkerCount(configuration.getWorkerCount()).withName("NettyServerTCPWorker").build();
wp = workerPool;
}
channelFactory = new NioServerSocketChannelFactory(bp, wp);
serverBootstrap = new ServerBootstrap(channelFactory);
serverBootstrap.setOption("child.keepAlive", configuration.isKeepAlive());
serverBootstrap.setOption("child.tcpNoDelay", configuration.isTcpNoDelay());
serverBootstrap.setOption("reuseAddress", configuration.isReuseAddress());
serverBootstrap.setOption("child.reuseAddress", configuration.isReuseAddress());
serverBootstrap.setOption("child.connectTimeoutMillis", configuration.getConnectTimeout());
if (configuration.getBacklog() > 0) {
serverBootstrap.setOption("backlog", configuration.getBacklog());
}
// set any additional netty options
if (configuration.getOptions() != null) {
for (Map.Entry<String, Object> entry : configuration.getOptions().entrySet()) {
serverBootstrap.setOption(entry.getKey(), entry.getValue());
}
}
LOG.debug("Created ServerBootstrap {} with options: {}", serverBootstrap, serverBootstrap.getOptions());
// set the pipeline factory, which creates the pipeline for each newly created channels
serverBootstrap.setPipelineFactory(pipelineFactory);
LOG.info("ServerBootstrap binding to {}:{}", configuration.getHost(), configuration.getPort());
channel = serverBootstrap.bind(new InetSocketAddress(configuration.getHost(), configuration.getPort()));
// to keep track of all channels in use
allChannels.add(channel);
}
use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project hadoop by apache.
the class ShuffleHandler method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
manageOsCache = conf.getBoolean(SHUFFLE_MANAGE_OS_CACHE, DEFAULT_SHUFFLE_MANAGE_OS_CACHE);
readaheadLength = conf.getInt(SHUFFLE_READAHEAD_BYTES, DEFAULT_SHUFFLE_READAHEAD_BYTES);
maxShuffleConnections = conf.getInt(MAX_SHUFFLE_CONNECTIONS, DEFAULT_MAX_SHUFFLE_CONNECTIONS);
int maxShuffleThreads = conf.getInt(MAX_SHUFFLE_THREADS, DEFAULT_MAX_SHUFFLE_THREADS);
if (maxShuffleThreads == 0) {
maxShuffleThreads = 2 * Runtime.getRuntime().availableProcessors();
}
shuffleBufferSize = conf.getInt(SHUFFLE_BUFFER_SIZE, DEFAULT_SHUFFLE_BUFFER_SIZE);
shuffleTransferToAllowed = conf.getBoolean(SHUFFLE_TRANSFERTO_ALLOWED, (Shell.WINDOWS) ? WINDOWS_DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED : DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED);
maxSessionOpenFiles = conf.getInt(SHUFFLE_MAX_SESSION_OPEN_FILES, DEFAULT_SHUFFLE_MAX_SESSION_OPEN_FILES);
ThreadFactory bossFactory = new ThreadFactoryBuilder().setNameFormat("ShuffleHandler Netty Boss #%d").build();
ThreadFactory workerFactory = new ThreadFactoryBuilder().setNameFormat("ShuffleHandler Netty Worker #%d").build();
selector = new NioServerSocketChannelFactory(HadoopExecutors.newCachedThreadPool(bossFactory), HadoopExecutors.newCachedThreadPool(workerFactory), maxShuffleThreads);
super.serviceInit(new Configuration(conf));
}
use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project opentsdb by OpenTSDB.
the class TSDMain method main.
public static void main(String[] args) throws IOException {
Logger log = LoggerFactory.getLogger(TSDMain.class);
log.info("Starting.");
log.info(BuildData.revisionString());
log.info(BuildData.buildString());
try {
// Release a FD we don't need.
System.in.close();
} catch (Exception e) {
log.warn("Failed to close stdin", e);
}
final ArgP argp = new ArgP();
CliOptions.addCommon(argp);
argp.addOption("--port", "NUM", "TCP port to listen on.");
argp.addOption("--bind", "ADDR", "Address to bind to (default: 0.0.0.0).");
argp.addOption("--staticroot", "PATH", "Web root from which to serve static files (/s URLs).");
argp.addOption("--cachedir", "PATH", "Directory under which to cache result of requests.");
argp.addOption("--worker-threads", "NUM", "Number for async io workers (default: cpu * 2).");
argp.addOption("--async-io", "true|false", "Use async NIO (default true) or traditional blocking io");
argp.addOption("--read-only", "true|false", "Set tsd.mode to ro (default false)");
argp.addOption("--disable-ui", "true|false", "Set tsd.core.enable_ui to false (default true)");
argp.addOption("--disable-api", "true|false", "Set tsd.core.enable_api to false (default true)");
argp.addOption("--backlog", "NUM", "Size of connection attempt queue (default: 3072 or kernel" + " somaxconn.");
argp.addOption("--max-connections", "NUM", "Maximum number of connections to accept");
argp.addOption("--flush-interval", "MSEC", "Maximum time for which a new data point can be buffered" + " (default: " + DEFAULT_FLUSH_INTERVAL + ").");
argp.addOption("--statswport", "Force all stats to include the port");
CliOptions.addAutoMetricFlag(argp);
args = CliOptions.parse(argp, args);
// free().
args = null;
// get a config object
Config config = CliOptions.getConfig(argp);
// check for the required parameters
try {
if (config.getString("tsd.http.staticroot").isEmpty())
usage(argp, "Missing static root directory", 1);
} catch (NullPointerException npe) {
usage(argp, "Missing static root directory", 1);
}
try {
if (config.getString("tsd.http.cachedir").isEmpty())
usage(argp, "Missing cache directory", 1);
} catch (NullPointerException npe) {
usage(argp, "Missing cache directory", 1);
}
try {
if (!config.hasProperty("tsd.network.port"))
usage(argp, "Missing network port", 1);
config.getInt("tsd.network.port");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid network port setting", 1);
}
// validate the cache and staticroot directories
try {
FileSystem.checkDirectory(config.getString("tsd.http.staticroot"), !Const.MUST_BE_WRITEABLE, Const.DONT_CREATE);
FileSystem.checkDirectory(config.getString("tsd.http.cachedir"), Const.MUST_BE_WRITEABLE, Const.CREATE_IF_NEEDED);
} catch (IllegalArgumentException e) {
usage(argp, e.getMessage(), 3);
}
final ServerSocketChannelFactory factory;
int connections_limit = 0;
try {
connections_limit = config.getInt("tsd.core.connections.limit");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid connections limit", 1);
}
if (config.getBoolean("tsd.network.async_io")) {
int workers = Runtime.getRuntime().availableProcessors() * 2;
if (config.hasProperty("tsd.network.worker_threads")) {
try {
workers = config.getInt("tsd.network.worker_threads");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid worker thread count", 1);
}
}
final Executor executor = Executors.newCachedThreadPool();
final NioServerBossPool boss_pool = new NioServerBossPool(executor, 1, new Threads.BossThreadNamer());
final NioWorkerPool worker_pool = new NioWorkerPool(executor, workers, new Threads.WorkerThreadNamer());
factory = new NioServerSocketChannelFactory(boss_pool, worker_pool);
} else {
factory = new OioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), new Threads.PrependThreadNamer());
}
StartupPlugin startup = null;
try {
startup = loadStartupPlugins(config);
} catch (IllegalArgumentException e) {
usage(argp, e.getMessage(), 3);
} catch (Exception e) {
throw new RuntimeException("Initialization failed", e);
}
try {
tsdb = new TSDB(config);
if (startup != null) {
tsdb.setStartupPlugin(startup);
}
tsdb.initializePlugins(true);
if (config.getBoolean("tsd.storage.hbase.prefetch_meta")) {
tsdb.preFetchHBaseMeta();
}
// Make sure we don't even start if we can't find our tables.
tsdb.checkNecessaryTablesExist().joinUninterruptibly();
registerShutdownHook();
final ServerBootstrap server = new ServerBootstrap(factory);
// This manager is capable of lazy init, but we force an init
// here to fail fast.
final RpcManager manager = RpcManager.instance(tsdb);
server.setPipelineFactory(new PipelineFactory(tsdb, manager, connections_limit));
if (config.hasProperty("tsd.network.backlog")) {
server.setOption("backlog", config.getInt("tsd.network.backlog"));
}
server.setOption("child.tcpNoDelay", config.getBoolean("tsd.network.tcp_no_delay"));
server.setOption("child.keepAlive", config.getBoolean("tsd.network.keep_alive"));
server.setOption("reuseAddress", config.getBoolean("tsd.network.reuse_address"));
// null is interpreted as the wildcard address.
InetAddress bindAddress = null;
if (config.hasProperty("tsd.network.bind")) {
bindAddress = InetAddress.getByName(config.getString("tsd.network.bind"));
}
// we validated the network port config earlier
final InetSocketAddress addr = new InetSocketAddress(bindAddress, config.getInt("tsd.network.port"));
server.bind(addr);
if (startup != null) {
startup.setReady(tsdb);
}
log.info("Ready to serve on " + addr);
} catch (Throwable e) {
factory.releaseExternalResources();
try {
if (tsdb != null)
tsdb.shutdown().joinUninterruptibly();
} catch (Exception e2) {
log.error("Failed to shutdown HBase client", e2);
}
throw new RuntimeException("Initialization failed", e);
}
// The server is now running in separate threads, we can exit main.
}
Aggregations