use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project opentsdb by OpenTSDB.
the class TSDMain method main.
public static void main(String[] args) throws IOException {
Logger log = LoggerFactory.getLogger(TSDMain.class);
log.info("Starting.");
log.info(BuildData.revisionString());
log.info(BuildData.buildString());
try {
// Release a FD we don't need.
System.in.close();
} catch (Exception e) {
log.warn("Failed to close stdin", e);
}
final ArgP argp = new ArgP();
CliOptions.addCommon(argp);
argp.addOption("--port", "NUM", "TCP port to listen on.");
argp.addOption("--bind", "ADDR", "Address to bind to (default: 0.0.0.0).");
argp.addOption("--staticroot", "PATH", "Web root from which to serve static files (/s URLs).");
argp.addOption("--cachedir", "PATH", "Directory under which to cache result of requests.");
argp.addOption("--worker-threads", "NUM", "Number for async io workers (default: cpu * 2).");
argp.addOption("--async-io", "true|false", "Use async NIO (default true) or traditional blocking io");
argp.addOption("--read-only", "true|false", "Set tsd.mode to ro (default false)");
argp.addOption("--disable-ui", "true|false", "Set tsd.core.enable_ui to false (default true)");
argp.addOption("--disable-api", "true|false", "Set tsd.core.enable_api to false (default true)");
argp.addOption("--backlog", "NUM", "Size of connection attempt queue (default: 3072 or kernel" + " somaxconn.");
argp.addOption("--max-connections", "NUM", "Maximum number of connections to accept");
argp.addOption("--flush-interval", "MSEC", "Maximum time for which a new data point can be buffered" + " (default: " + DEFAULT_FLUSH_INTERVAL + ").");
argp.addOption("--statswport", "Force all stats to include the port");
CliOptions.addAutoMetricFlag(argp);
args = CliOptions.parse(argp, args);
// free().
args = null;
// get a config object
Config config = CliOptions.getConfig(argp);
// check for the required parameters
try {
if (config.getString("tsd.http.staticroot").isEmpty())
usage(argp, "Missing static root directory", 1);
} catch (NullPointerException npe) {
usage(argp, "Missing static root directory", 1);
}
try {
if (config.getString("tsd.http.cachedir").isEmpty())
usage(argp, "Missing cache directory", 1);
} catch (NullPointerException npe) {
usage(argp, "Missing cache directory", 1);
}
try {
if (!config.hasProperty("tsd.network.port"))
usage(argp, "Missing network port", 1);
config.getInt("tsd.network.port");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid network port setting", 1);
}
// validate the cache and staticroot directories
try {
FileSystem.checkDirectory(config.getString("tsd.http.staticroot"), !Const.MUST_BE_WRITEABLE, Const.DONT_CREATE);
FileSystem.checkDirectory(config.getString("tsd.http.cachedir"), Const.MUST_BE_WRITEABLE, Const.CREATE_IF_NEEDED);
} catch (IllegalArgumentException e) {
usage(argp, e.getMessage(), 3);
}
final ServerSocketChannelFactory factory;
int connections_limit = 0;
try {
connections_limit = config.getInt("tsd.core.connections.limit");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid connections limit", 1);
}
if (config.getBoolean("tsd.network.async_io")) {
int workers = Runtime.getRuntime().availableProcessors() * 2;
if (config.hasProperty("tsd.network.worker_threads")) {
try {
workers = config.getInt("tsd.network.worker_threads");
} catch (NumberFormatException nfe) {
usage(argp, "Invalid worker thread count", 1);
}
}
final Executor executor = Executors.newCachedThreadPool();
final NioServerBossPool boss_pool = new NioServerBossPool(executor, 1, new Threads.BossThreadNamer());
final NioWorkerPool worker_pool = new NioWorkerPool(executor, workers, new Threads.WorkerThreadNamer());
factory = new NioServerSocketChannelFactory(boss_pool, worker_pool);
} else {
factory = new OioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), new Threads.PrependThreadNamer());
}
StartupPlugin startup = null;
try {
startup = loadStartupPlugins(config);
} catch (IllegalArgumentException e) {
usage(argp, e.getMessage(), 3);
} catch (Exception e) {
throw new RuntimeException("Initialization failed", e);
}
try {
tsdb = new TSDB(config);
if (startup != null) {
tsdb.setStartupPlugin(startup);
}
tsdb.initializePlugins(true);
if (config.getBoolean("tsd.storage.hbase.prefetch_meta")) {
tsdb.preFetchHBaseMeta();
}
// Make sure we don't even start if we can't find our tables.
tsdb.checkNecessaryTablesExist().joinUninterruptibly();
registerShutdownHook();
final ServerBootstrap server = new ServerBootstrap(factory);
// This manager is capable of lazy init, but we force an init
// here to fail fast.
final RpcManager manager = RpcManager.instance(tsdb);
server.setPipelineFactory(new PipelineFactory(tsdb, manager, connections_limit));
if (config.hasProperty("tsd.network.backlog")) {
server.setOption("backlog", config.getInt("tsd.network.backlog"));
}
server.setOption("child.tcpNoDelay", config.getBoolean("tsd.network.tcp_no_delay"));
server.setOption("child.keepAlive", config.getBoolean("tsd.network.keep_alive"));
server.setOption("reuseAddress", config.getBoolean("tsd.network.reuse_address"));
// null is interpreted as the wildcard address.
InetAddress bindAddress = null;
if (config.hasProperty("tsd.network.bind")) {
bindAddress = InetAddress.getByName(config.getString("tsd.network.bind"));
}
// we validated the network port config earlier
final InetSocketAddress addr = new InetSocketAddress(bindAddress, config.getInt("tsd.network.port"));
server.bind(addr);
if (startup != null) {
startup.setReady(tsdb);
}
log.info("Ready to serve on " + addr);
} catch (Throwable e) {
factory.releaseExternalResources();
try {
if (tsdb != null)
tsdb.shutdown().joinUninterruptibly();
} catch (Exception e2) {
log.error("Failed to shutdown HBase client", e2);
}
throw new RuntimeException("Initialization failed", e);
}
// The server is now running in separate threads, we can exit main.
}
use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project load-balancer by RestComm.
the class AppServer method start.
public void start() {
ExecutorService executor = Executors.newCachedThreadPool();
protocolObjects = new ProtocolObjects(name, "gov.nist", transport, false, false, true);
if (!isSendResponse) {
sipListener = new TestSipListener(isIpv6, port, lbSIPint, protocolObjects, false);
sipListener.abortProcessing = true;
} else if (!isDummy) {
if (!isMediaFailure || !isFirstStart) {
sipListener = new TestSipListener(isIpv6, port, lbSIPint, protocolObjects, false);
} else {
sipListener = new TestSipListener(isIpv6, port, lbSIPint, protocolObjects, false);
sipListener.setRespondWithError(Response.SERVICE_UNAVAILABLE);
}
} else {
sipListener = new TestSipListener(isIpv6, port + 1, lbSIPint, protocolObjects, false);
}
sipListener.appServer = this;
try {
sipProvider = sipListener.createProvider();
sipProvider.addSipListener(sipListener);
protocolObjects.start();
} catch (Exception e) {
e.printStackTrace();
}
// generate node
if (!isIpv6)
node = new Node(name, "127.0.0.1");
else
node = new Node(name, "::1");
node.getProperties().put(transport.toLowerCase() + "Port", "" + port);
node.getProperties().put(Protocol.VERSION, version);
node.getProperties().put(Protocol.SESSION_ID, "" + System.currentTimeMillis());
node.getProperties().put(Protocol.HEARTBEAT_PORT, "" + heartbeatPort);
nioServerSocketChannelFactory = new NioServerSocketChannelFactory(executor, executor);
serverBootstrap = new ServerBootstrap(nioServerSocketChannelFactory);
serverBootstrap.setPipelineFactory(new ServerPipelineFactory(this));
serverChannel = serverBootstrap.bind(new InetSocketAddress(node.getIp(), heartbeatPort));
logger.info("Heartbeat service listen on " + heartbeatAddress + ":" + heartbeatPort + " (Node's side)");
// start client
if (balancers == null)
clientController = new ClientController(this, lbAddress, lbPort, node, 5000, heartbeatPeriod, executor);
else {
String[] lbs = balancers.split(",");
clientControllers = new ClientController[lbs.length];
for (int i = 0; i < lbs.length; i++) {
if (!isIpv6)
node = new Node(name, "127.0.0.1");
else
node = new Node(name, "::1");
node.getProperties().put(transport.toLowerCase() + "Port", "" + port);
node.getProperties().put(Protocol.VERSION, version);
node.getProperties().put(Protocol.HEARTBEAT_PORT, "" + heartbeatPort);
clientControllers[i] = new ClientController(this, lbs[i].split(":")[0], Integer.parseInt(lbs[i].split(":")[1]), node, 5000, heartbeatPeriod, executor);
clientControllers[i].startClient();
}
}
if (sendHeartbeat) {
if (balancers == null)
clientController.startClient();
}
}
use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project load-balancer by RestComm.
the class HeartbeatService method start.
public void start() {
serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(executor, executor));
serverBootstrap.setPipelineFactory(new ServerPipelineFactory(this));
serverChannel = serverBootstrap.bind(new InetSocketAddress(heartBeatIp, heartBeatPort));
this.started.set(true);
logger.info("Heartbeat service listen on " + heartBeatIp + ":" + heartBeatPort + " (Node's side)");
}
use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project feeyo-hlsserver by variflight.
the class HttpServer method startup.
public void startup(int port) {
final int maxContentLength = 1024 * 1024 * 1024;
bootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(bossExecutor, workerExecutor));
bootstrap.setOption("connectTimeoutMillis", 10000);
// kernel optimization
bootstrap.setOption("reuseAddress", true);
// for mobiles & our
bootstrap.setOption("keepAlive", true);
// better latency over
bootstrap.setOption("tcpNoDelay", true);
bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline p = Channels.pipeline();
p.addLast("http-encoder", new HttpResponseEncoder());
p.addLast("http-decoder", new HttpRequestDecoder());
p.addLast("http-aggregator", new HttpChunkAggregator(maxContentLength));
p.addLast("server-handler", new HttpServerRequestHandler());
return p;
}
});
channel = bootstrap.bind(new InetSocketAddress(port));
}
use of org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory in project socket.io-netty by ibdknox.
the class NSIOServer method start.
public void start() {
bootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool()));
// Set up the event pipeline factory.
socketHandler = new WebSocketServerHandler(handler);
bootstrap.setPipelineFactory(new WebSocketServerPipelineFactory(socketHandler));
// Bind and start to accept incoming connections.
this.serverChannel = bootstrap.bind(new InetSocketAddress(port));
this.running = true;
try {
FlashPolicyServer.start();
} catch (Exception e) {
// TODO: this should not be exception
System.out.println("You must run as sudo for flash policy server. X-Domain flash will not currently work.");
}
System.out.println("Server Started at port [" + port + "]");
}
Aggregations