use of org.apache.thrift.server.TThreadPoolServer in project yyl_example by Relucent.
the class HelloServer method getPoolServer.
/**
* 线程池服务模型,使用标准的阻塞式IO,预先创建一组线程处理请求
*/
public static TServer getPoolServer(int port, HelloService.Processor<HelloServiceHandler> processor) throws TTransportException {
TServerTransport transport = new TServerSocket(port);
TServer server = new TThreadPoolServer(new TThreadPoolServer.Args(transport).processor(processor));
return server;
}
use of org.apache.thrift.server.TThreadPoolServer in project zeppelin by apache.
the class RemoteInterpreterEventServer method start.
public void start() throws IOException {
Thread startingThread = new Thread() {
@Override
public void run() {
try (TServerSocket tSocket = new TServerSocket(RemoteInterpreterUtils.findAvailablePort(portRange))) {
port = tSocket.getServerSocket().getLocalPort();
host = RemoteInterpreterUtils.findAvailableHostAddress();
LOGGER.info("InterpreterEventServer is starting at {}:{}", host, port);
RemoteInterpreterEventService.Processor<RemoteInterpreterEventServer> processor = new RemoteInterpreterEventService.Processor<>(RemoteInterpreterEventServer.this);
thriftServer = new TThreadPoolServer(new TThreadPoolServer.Args(tSocket).processor(processor));
thriftServer.serve();
} catch (IOException | TTransportException e) {
throw new RuntimeException("Fail to create TServerSocket", e);
}
LOGGER.info("ThriftServer-Thread finished");
}
};
startingThread.start();
long start = System.currentTimeMillis();
while ((System.currentTimeMillis() - start) < 30 * 1000) {
if (thriftServer != null && thriftServer.isServing()) {
break;
}
try {
Thread.sleep(500);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
if (thriftServer != null && !thriftServer.isServing()) {
throw new IOException("Fail to start InterpreterEventServer in 30 seconds.");
}
LOGGER.info("RemoteInterpreterEventServer is started");
runner = new AppendOutputRunner(listener);
appendFuture = appendService.scheduleWithFixedDelay(runner, 0, AppendOutputRunner.BUFFER_TIME_MS, TimeUnit.MILLISECONDS);
}
use of org.apache.thrift.server.TThreadPoolServer in project dubbo by alibaba.
the class AbstractTest method init.
protected void init() throws Exception {
serverTransport = new TServerSocket(PORT);
TBinaryProtocol.Factory bFactory = new TBinaryProtocol.Factory();
server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport).inputProtocolFactory(bFactory).outputProtocolFactory(bFactory).inputTransportFactory(getTransportFactory()).outputTransportFactory(getTransportFactory()).processor(getProcessor()));
Thread startTread = new Thread() {
@Override
public void run() {
server.serve();
}
};
startTread.setName("thrift-server");
startTread.start();
while (!server.isServing()) {
Thread.sleep(100);
}
protocol = ExtensionLoader.getExtensionLoader(Protocol.class).getExtension(ThriftProtocol.NAME);
invoker = protocol.refer(getInterface(), getUrl());
}
use of org.apache.thrift.server.TThreadPoolServer in project hive by apache.
the class TestHiveMetastore method newThriftServer.
private TServer newThriftServer(TServerSocket socket, int poolSize, HiveConf conf) throws Exception {
HiveConf serverConf = new HiveConf(conf);
serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + getDerbyPath() + ";create=true");
baseHandler = HMS_HANDLER_CTOR.newInstance("new db based metaserver", serverConf);
IHMSHandler handler = GET_BASE_HMS_HANDLER.invoke(serverConf, baseHandler, false);
TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket).processor(new TSetIpAddressProcessor<>(handler)).transportFactory(new TTransportFactory()).protocolFactory(new TBinaryProtocol.Factory()).minWorkerThreads(poolSize).maxWorkerThreads(poolSize);
return new TThreadPoolServer(args);
}
use of org.apache.thrift.server.TThreadPoolServer in project hive by apache.
the class ThriftBinaryCLIService method initServer.
@Override
protected void initServer() {
try {
// Server thread pool
String threadPoolName = "HiveServer2-Handler-Pool";
ExecutorService executorService = new ThreadPoolExecutor(minWorkerThreads, maxWorkerThreads, workerKeepAliveTime, TimeUnit.SECONDS, new SynchronousQueue<>(), new ThreadFactoryWithGarbageCleanup(threadPoolName));
// Thrift configs
hiveAuthFactory = new HiveAuthFactory(hiveConf);
TTransportFactory transportFactory = hiveAuthFactory.getAuthTransFactory();
TProcessorFactory processorFactory = hiveAuthFactory.getAuthProcFactory(this);
TServerSocket serverSocket = null;
List<String> sslVersionBlacklist = new ArrayList<String>();
for (String sslVersion : hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) {
sslVersionBlacklist.add(sslVersion);
}
if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL)) {
serverSocket = HiveAuthUtils.getServerSocket(hiveHost, portNum);
} else {
String keyStorePath = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH).trim();
if (keyStorePath.isEmpty()) {
throw new IllegalArgumentException(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname + " Not configured for SSL connection");
}
String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf, HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname);
String keyStoreType = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_TYPE).trim();
String keyStoreAlgorithm = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYMANAGERFACTORY_ALGORITHM).trim();
String includeCiphersuites = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_BINARY_INCLUDE_CIPHERSUITES).trim();
serverSocket = HiveAuthUtils.getServerSSLSocket(hiveHost, portNum, keyStorePath, keyStorePassword, keyStoreType, keyStoreAlgorithm, sslVersionBlacklist, includeCiphersuites);
}
// Server args
int maxMessageSize = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE);
int requestTimeout = (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT, TimeUnit.SECONDS);
int beBackoffSlotLength = (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH, TimeUnit.MILLISECONDS);
TThreadPoolServer.Args sargs = new TThreadPoolServer.Args(serverSocket).processorFactory(processorFactory).transportFactory(transportFactory).protocolFactory(new TBinaryProtocol.Factory()).inputProtocolFactory(new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize)).executorService(executorService);
// TCP Server
server = new TThreadPoolServer(sargs);
server.setServerEventHandler(new TServerEventHandler() {
@Override
public ServerContext createContext(TProtocol input, TProtocol output) {
Metrics metrics = MetricsFactory.getInstance();
if (metrics != null) {
metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
metrics.incrementCounter(MetricsConstant.CUMULATIVE_CONNECTION_COUNT);
}
return new ThriftCLIServerContext();
}
/**
* This is called by the Thrift server when the underlying client
* connection is cleaned up by the server because the connection has
* been closed.
*/
@Override
public void deleteContext(ServerContext serverContext, TProtocol input, TProtocol output) {
Metrics metrics = MetricsFactory.getInstance();
if (metrics != null) {
metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
}
final ThriftCLIServerContext context = (ThriftCLIServerContext) serverContext;
final Optional<SessionHandle> sessionHandle = context.getSessionHandle();
if (sessionHandle.isPresent()) {
// Normally, the client should politely inform the server it is
// closing its session with Hive before closing its network
// connection. However, if the client connection dies for any reason
// (load-balancer round-robin configuration, firewall kills
// long-running sessions, bad client, failed client, timed-out
// client, etc.) then the server will close the connection without
// having properly cleaned up the Hive session (resources,
// configuration, logging etc.). That needs to be cleaned up now.
LOG.warn("Client connection bound to {} unexpectedly closed: closing this Hive session to release its resources. " + "The connection processed {} total messages during its lifetime of {}ms. Inspect the client connection " + "for time-out, firewall killing the connection, invalid load balancer configuration, etc.", sessionHandle, context.getMessagesProcessedCount(), context.getDuration().toMillis());
try {
final boolean close = cliService.getSessionManager().getSession(sessionHandle.get()).getHiveConf().getBoolVar(ConfVars.HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT);
if (close) {
cliService.closeSession(sessionHandle.get());
} else {
LOG.warn("Session not actually closed because configuration {} is set to false", ConfVars.HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT.varname);
}
} catch (HiveSQLException e) {
LOG.warn("Failed to close session", e);
}
} else {
// able to create one in the first place
if (context.getSessionCount() == 0) {
LOG.info("A client connection was closed before creating a Hive session. " + "Most likely it is a client that is connecting to this server then " + "immediately closing the socket (i.e., TCP health check or port scanner)");
}
}
}
@Override
public void preServe() {
}
@Override
public void processContext(ServerContext serverContext, TTransport input, TTransport output) {
ThriftCLIServerContext context = (ThriftCLIServerContext) serverContext;
currentServerContext.set(context);
context.incMessagesProcessedCount();
}
});
String msg = "Starting " + ThriftBinaryCLIService.class.getSimpleName() + " on port " + portNum + " with " + minWorkerThreads + "..." + maxWorkerThreads + " worker threads";
LOG.info(msg);
} catch (Exception e) {
throw new RuntimeException("Failed to init thrift server", e);
}
}
Aggregations