use of org.jboss.netty.channel.socket.nio.NioServerBossPool in project tez by apache.
the class ShuffleHandler method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
manageOsCache = conf.getBoolean(SHUFFLE_MANAGE_OS_CACHE, DEFAULT_SHUFFLE_MANAGE_OS_CACHE);
readaheadLength = conf.getInt(SHUFFLE_READAHEAD_BYTES, DEFAULT_SHUFFLE_READAHEAD_BYTES);
maxShuffleConnections = conf.getInt(MAX_SHUFFLE_CONNECTIONS, DEFAULT_MAX_SHUFFLE_CONNECTIONS);
int maxShuffleThreads = conf.getInt(MAX_SHUFFLE_THREADS, DEFAULT_MAX_SHUFFLE_THREADS);
if (maxShuffleThreads == 0) {
maxShuffleThreads = 2 * Runtime.getRuntime().availableProcessors();
}
shuffleBufferSize = conf.getInt(SHUFFLE_BUFFER_SIZE, DEFAULT_SHUFFLE_BUFFER_SIZE);
shuffleTransferToAllowed = conf.getBoolean(SHUFFLE_TRANSFERTO_ALLOWED, (Shell.WINDOWS) ? WINDOWS_DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED : DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED);
maxSessionOpenFiles = conf.getInt(SHUFFLE_MAX_SESSION_OPEN_FILES, DEFAULT_SHUFFLE_MAX_SESSION_OPEN_FILES);
final String BOSS_THREAD_NAME_PREFIX = "Tez Shuffle Handler Boss #";
NioServerBossPool bossPool = new NioServerBossPool(Executors.newCachedThreadPool(), 1, new ThreadNameDeterminer() {
@Override
public String determineThreadName(String currentThreadName, String proposedThreadName) throws Exception {
return BOSS_THREAD_NAME_PREFIX + currentThreadName.substring(currentThreadName.lastIndexOf('-') + 1);
}
});
final String WORKER_THREAD_NAME_PREFIX = "Tez Shuffle Handler Worker #";
NioWorkerPool workerPool = new NioWorkerPool(Executors.newCachedThreadPool(), maxShuffleThreads, new ThreadNameDeterminer() {
@Override
public String determineThreadName(String currentThreadName, String proposedThreadName) throws Exception {
return WORKER_THREAD_NAME_PREFIX + currentThreadName.substring(currentThreadName.lastIndexOf('-') + 1);
}
});
selector = new NioServerSocketChannelFactory(bossPool, workerPool);
super.serviceInit(new YarnConfiguration(conf));
}
use of org.jboss.netty.channel.socket.nio.NioServerBossPool in project opentsdb by OpenTSDB.
the class OpenTSDBMain method launchTSD.
/**
* Starts the TSD.
* @param args The command line arguments
*/
private static void launchTSD(String[] args) {
ConfigArgP cap = new ConfigArgP(args);
Config config = cap.getConfig();
ArgP argp = cap.getArgp();
applyCommandLine(cap, argp);
config.loadStaticVariables();
// All options are now correctly set in config
setJVMName(config.getInt("tsd.network.port"), config.getString("tsd.network.bind"));
// Configure the logging
if (config.hasProperty("tsd.logback.file")) {
final String logBackFile = config.getString("tsd.logback.file");
final String rollPattern = config.hasProperty("tsd.logback.rollpattern") ? config.getString("tsd.logback.rollpattern") : null;
final boolean keepConsoleOpen = config.hasProperty("tsd.logback.console") ? config.getBoolean("tsd.logback.console") : false;
log.info("\n\t===================================\n\tReconfiguring logback. Logging to file:\n\t{}\n\t===================================\n", logBackFile);
setLogbackInternal(logBackFile, rollPattern, keepConsoleOpen);
} else {
final String logBackConfig;
if (config.hasProperty("tsd.logback.config")) {
logBackConfig = config.getString("tsd.logback.config");
} else {
logBackConfig = System.getProperty("tsd.logback.config", null);
}
if (logBackConfig != null && !logBackConfig.trim().isEmpty() && new File(logBackConfig.trim()).canRead()) {
setLogbackExternal(logBackConfig.trim());
}
}
if (config.auto_metric()) {
log.info("\n\t==========================================\n\tAuto-Metric Enabled\n\t==========================================\n");
} else {
log.warn("\n\t==========================================\n\tAuto-Metric Disabled\n\t==========================================\n");
}
try {
// Write the PID file
writePid(config.getString("tsd.process.pid.file"), config.getBoolean("tsd.process.pid.ignore.existing"));
// Export the UI content
if (!config.getBoolean("tsd.ui.noexport")) {
loadContent(config.getString("tsd.http.staticroot"));
}
// Create the cache dir if it does not exist
File cacheDir = new File(config.getString("tsd.http.cachedir"));
if (cacheDir.exists()) {
if (!cacheDir.isDirectory()) {
throw new IllegalArgumentException("The http cache directory [" + cacheDir + "] is not a directory, but a file, which is bad");
}
} else {
if (!cacheDir.mkdirs()) {
throw new IllegalArgumentException("Failed to create the http cache directory [" + cacheDir + "]");
}
}
} catch (Exception ex) {
log.error("Failed to process tsd configuration", ex);
System.exit(-1);
}
// =====================================================================
// Command line processing complete, ready to start TSD.
// The code from here to the end of the method is an exact duplicate
// of {@link TSDMain#main(String[])} once configuration is complete.
// At the time of this writing, this is at line 123 starting with the
// code: final ServerSocketChannelFactory factory;
// =====================================================================
log.info("Configuration complete. Starting TSDB");
final ServerSocketChannelFactory factory;
if (config.getBoolean("tsd.network.async_io")) {
int workers = Runtime.getRuntime().availableProcessors() * 2;
if (config.hasProperty("tsd.network.worker_threads")) {
try {
workers = config.getInt("tsd.network.worker_threads");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid worker thread count", 1);
}
}
final Executor executor = Executors.newCachedThreadPool();
final NioServerBossPool boss_pool = new NioServerBossPool(executor, 1, new Threads.BossThreadNamer());
final NioWorkerPool worker_pool = new NioWorkerPool(executor, workers, new Threads.WorkerThreadNamer());
factory = new NioServerSocketChannelFactory(boss_pool, worker_pool);
} else {
factory = new OioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool());
}
TSDB tsdb = null;
try {
tsdb = new TSDB(config);
tsdb.initializePlugins(true);
// Make sure we don't even start if we can't find our tables.
tsdb.checkNecessaryTablesExist().joinUninterruptibly();
registerShutdownHook(tsdb);
final ServerBootstrap server = new ServerBootstrap(factory);
server.setPipelineFactory(new PipelineFactory(tsdb));
if (config.hasProperty("tsd.network.backlog")) {
server.setOption("backlog", config.getInt("tsd.network.backlog"));
}
server.setOption("child.tcpNoDelay", config.getBoolean("tsd.network.tcp_no_delay"));
server.setOption("child.keepAlive", config.getBoolean("tsd.network.keep_alive"));
server.setOption("reuseAddress", config.getBoolean("tsd.network.reuse_address"));
// null is interpreted as the wildcard address.
InetAddress bindAddress = null;
if (config.hasProperty("tsd.network.bind")) {
bindAddress = InetAddress.getByName(config.getString("tsd.network.bind"));
}
// we validated the network port config earlier
final InetSocketAddress addr = new InetSocketAddress(bindAddress, config.getInt("tsd.network.port"));
server.bind(addr);
log.info("Ready to serve on " + addr);
} catch (Throwable e) {
factory.releaseExternalResources();
try {
if (tsdb != null)
tsdb.shutdown().joinUninterruptibly();
} catch (Exception e2) {
log.error("Failed to shutdown HBase client", e2);
}
throw new RuntimeException("Initialization failed", e);
}
// The server is now running in separate threads, we can exit main.
}
use of org.jboss.netty.channel.socket.nio.NioServerBossPool in project opentsdb by OpenTSDB.
the class TSDMain method main.
public static void main(String[] args) throws IOException {
Logger log = LoggerFactory.getLogger(TSDMain.class);
log.info("Starting.");
log.info(BuildData.revisionString());
log.info(BuildData.buildString());
try {
// Release a FD we don't need.
System.in.close();
} catch (Exception e) {
log.warn("Failed to close stdin", e);
}
final ArgP argp = new ArgP();
CliOptions.addCommon(argp);
argp.addOption("--port", "NUM", "TCP port to listen on.");
argp.addOption("--bind", "ADDR", "Address to bind to (default: 0.0.0.0).");
argp.addOption("--staticroot", "PATH", "Web root from which to serve static files (/s URLs).");
argp.addOption("--cachedir", "PATH", "Directory under which to cache result of requests.");
argp.addOption("--worker-threads", "NUM", "Number for async io workers (default: cpu * 2).");
argp.addOption("--async-io", "true|false", "Use async NIO (default true) or traditional blocking io");
argp.addOption("--read-only", "true|false", "Set tsd.mode to ro (default false)");
argp.addOption("--disable-ui", "true|false", "Set tsd.core.enable_ui to false (default true)");
argp.addOption("--disable-api", "true|false", "Set tsd.core.enable_api to false (default true)");
argp.addOption("--backlog", "NUM", "Size of connection attempt queue (default: 3072 or kernel" + " somaxconn.");
argp.addOption("--max-connections", "NUM", "Maximum number of connections to accept");
argp.addOption("--flush-interval", "MSEC", "Maximum time for which a new data point can be buffered" + " (default: " + DEFAULT_FLUSH_INTERVAL + ").");
argp.addOption("--statswport", "Force all stats to include the port");
CliOptions.addAutoMetricFlag(argp);
args = CliOptions.parse(argp, args);
// free().
args = null;
// get a config object
Config config = CliOptions.getConfig(argp);
// check for the required parameters
try {
if (config.getString("tsd.http.staticroot").isEmpty())
usage(argp, "Missing static root directory", 1);
} catch (NullPointerException npe) {
usage(argp, "Missing static root directory", 1);
}
try {
if (config.getString("tsd.http.cachedir").isEmpty())
usage(argp, "Missing cache directory", 1);
} catch (NullPointerException npe) {
usage(argp, "Missing cache directory", 1);
}
try {
if (!config.hasProperty("tsd.network.port"))
usage(argp, "Missing network port", 1);
config.getInt("tsd.network.port");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid network port setting", 1);
}
// validate the cache and staticroot directories
try {
FileSystem.checkDirectory(config.getString("tsd.http.staticroot"), !Const.MUST_BE_WRITEABLE, Const.DONT_CREATE);
FileSystem.checkDirectory(config.getString("tsd.http.cachedir"), Const.MUST_BE_WRITEABLE, Const.CREATE_IF_NEEDED);
} catch (IllegalArgumentException e) {
usage(argp, e.getMessage(), 3);
}
final ServerSocketChannelFactory factory;
int connections_limit = 0;
try {
connections_limit = config.getInt("tsd.core.connections.limit");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid connections limit", 1);
}
if (config.getBoolean("tsd.network.async_io")) {
int workers = Runtime.getRuntime().availableProcessors() * 2;
if (config.hasProperty("tsd.network.worker_threads")) {
try {
workers = config.getInt("tsd.network.worker_threads");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid worker thread count", 1);
}
}
final Executor executor = Executors.newCachedThreadPool();
final NioServerBossPool boss_pool = new NioServerBossPool(executor, 1, new Threads.BossThreadNamer());
final NioWorkerPool worker_pool = new NioWorkerPool(executor, workers, new Threads.WorkerThreadNamer());
factory = new NioServerSocketChannelFactory(boss_pool, worker_pool);
} else {
factory = new OioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), new Threads.PrependThreadNamer());
}
StartupPlugin startup = null;
try {
startup = loadStartupPlugins(config);
} catch (IllegalArgumentException e) {
usage(argp, e.getMessage(), 3);
} catch (Exception e) {
throw new RuntimeException("Initialization failed", e);
}
try {
tsdb = new TSDB(config);
if (startup != null) {
tsdb.setStartupPlugin(startup);
}
tsdb.initializePlugins(true);
if (config.getBoolean("tsd.storage.hbase.prefetch_meta")) {
tsdb.preFetchHBaseMeta();
}
// Make sure we don't even start if we can't find our tables.
tsdb.checkNecessaryTablesExist().joinUninterruptibly();
registerShutdownHook();
final ServerBootstrap server = new ServerBootstrap(factory);
// This manager is capable of lazy init, but we force an init
// here to fail fast.
final RpcManager manager = RpcManager.instance(tsdb);
server.setPipelineFactory(new PipelineFactory(tsdb, manager, connections_limit));
if (config.hasProperty("tsd.network.backlog")) {
server.setOption("backlog", config.getInt("tsd.network.backlog"));
}
server.setOption("child.tcpNoDelay", config.getBoolean("tsd.network.tcp_no_delay"));
server.setOption("child.keepAlive", config.getBoolean("tsd.network.keep_alive"));
server.setOption("reuseAddress", config.getBoolean("tsd.network.reuse_address"));
// null is interpreted as the wildcard address.
InetAddress bindAddress = null;
if (config.hasProperty("tsd.network.bind")) {
bindAddress = InetAddress.getByName(config.getString("tsd.network.bind"));
}
// we validated the network port config earlier
final InetSocketAddress addr = new InetSocketAddress(bindAddress, config.getInt("tsd.network.port"));
server.bind(addr);
if (startup != null) {
startup.setReady(tsdb);
}
log.info("Ready to serve on " + addr);
} catch (Throwable e) {
factory.releaseExternalResources();
try {
if (tsdb != null)
tsdb.shutdown().joinUninterruptibly();
} catch (Exception e2) {
log.error("Failed to shutdown HBase client", e2);
}
throw new RuntimeException("Initialization failed", e);
}
// The server is now running in separate threads, we can exit main.
}
use of org.jboss.netty.channel.socket.nio.NioServerBossPool in project pinpoint by naver.
the class PinpointServerAcceptor method createBootStrap.
private ServerBootstrap createBootStrap(int bossCount, int workerCount) {
// profiler, collector
ExecutorService boss = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Boss", true));
NioServerBossPool nioServerBossPool = new NioServerBossPool(boss, bossCount, ThreadNameDeterminer.CURRENT);
ExecutorService worker = Executors.newCachedThreadPool(new PinpointThreadFactory("Pinpoint-Server-Worker", true));
NioWorkerPool nioWorkerPool = new NioWorkerPool(worker, workerCount, ThreadNameDeterminer.CURRENT);
NioServerSocketChannelFactory nioClientSocketChannelFactory = new NioServerSocketChannelFactory(nioServerBossPool, nioWorkerPool);
return new ServerBootstrap(nioClientSocketChannelFactory);
}
Aggregations