use of org.apache.thrift.server.TThreadedSelectorServer in project hbase by apache.
the class ThriftServerRunner method setupServer.
/**
* Setting up the thrift TServer
*/
private void setupServer() throws Exception {
// Construct correct ProtocolFactory
TProtocolFactory protocolFactory;
if (conf.getBoolean(COMPACT_CONF_KEY, false)) {
LOG.debug("Using compact protocol");
protocolFactory = new TCompactProtocol.Factory();
} else {
LOG.debug("Using binary protocol");
protocolFactory = new TBinaryProtocol.Factory();
}
final TProcessor p = new Hbase.Processor<>(handler);
ImplType implType = ImplType.getServerImpl(conf);
TProcessor processor = p;
// Construct correct TransportFactory
TTransportFactory transportFactory;
if (conf.getBoolean(FRAMED_CONF_KEY, false) || implType.isAlwaysFramed) {
if (qop != null) {
throw new RuntimeException("Thrift server authentication" + " doesn't work with framed transport yet");
}
transportFactory = new TFramedTransport.Factory(conf.getInt(MAX_FRAME_SIZE_CONF_KEY, 2) * 1024 * 1024);
LOG.debug("Using framed transport");
} else if (qop == null) {
transportFactory = new TTransportFactory();
} else {
// Extract the name from the principal
String name = SecurityUtil.getUserFromPrincipal(conf.get("hbase.thrift.kerberos.principal"));
Map<String, String> saslProperties = new HashMap<>();
saslProperties.put(Sasl.QOP, qop);
TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory();
saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties, new SaslGssCallbackHandler() {
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof AuthorizeCallback) {
ac = (AuthorizeCallback) callback;
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL GSSAPI Callback");
}
}
if (ac != null) {
String authid = ac.getAuthenticationID();
String authzid = ac.getAuthorizationID();
if (!authid.equals(authzid)) {
ac.setAuthorized(false);
} else {
ac.setAuthorized(true);
String userName = SecurityUtil.getUserFromPrincipal(authzid);
LOG.info("Effective user: " + userName);
ac.setAuthorizedID(userName);
}
}
}
});
transportFactory = saslFactory;
// Create a processor wrapper, to get the caller
processor = new TProcessor() {
@Override
public boolean process(TProtocol inProt, TProtocol outProt) throws TException {
TSaslServerTransport saslServerTransport = (TSaslServerTransport) inProt.getTransport();
SaslServer saslServer = saslServerTransport.getSaslServer();
String principal = saslServer.getAuthorizationID();
hbaseHandler.setEffectiveUser(principal);
return p.process(inProt, outProt);
}
};
}
if (conf.get(BIND_CONF_KEY) != null && !implType.canSpecifyBindIP) {
LOG.error("Server types " + Joiner.on(", ").join(ImplType.serversThatCannotSpecifyBindIP()) + " don't support IP " + "address binding at the moment. See " + "https://issues.apache.org/jira/browse/HBASE-2155 for details.");
throw new RuntimeException("-" + BIND_CONF_KEY + " not supported with " + implType);
}
// Thrift's implementation uses '0' as a placeholder for 'use the default.'
int backlog = conf.getInt(BACKLOG_CONF_KEY, 0);
if (implType == ImplType.HS_HA || implType == ImplType.NONBLOCKING || implType == ImplType.THREADED_SELECTOR) {
InetAddress listenAddress = getBindAddress(conf);
TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(new InetSocketAddress(listenAddress, listenPort));
if (implType == ImplType.NONBLOCKING) {
TNonblockingServer.Args serverArgs = new TNonblockingServer.Args(serverTransport);
serverArgs.processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
tserver = new TNonblockingServer(serverArgs);
} else if (implType == ImplType.HS_HA) {
THsHaServer.Args serverArgs = new THsHaServer.Args(serverTransport);
CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics);
ExecutorService executorService = createExecutor(callQueue, serverArgs.getMaxWorkerThreads(), serverArgs.getMaxWorkerThreads());
serverArgs.executorService(executorService).processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
tserver = new THsHaServer(serverArgs);
} else {
// THREADED_SELECTOR
TThreadedSelectorServer.Args serverArgs = new HThreadedSelectorServerArgs(serverTransport, conf);
CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics);
ExecutorService executorService = createExecutor(callQueue, serverArgs.getWorkerThreads(), serverArgs.getWorkerThreads());
serverArgs.executorService(executorService).processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
tserver = new TThreadedSelectorServer(serverArgs);
}
LOG.info("starting HBase " + implType.simpleClassName() + " server on " + Integer.toString(listenPort));
} else if (implType == ImplType.THREAD_POOL) {
// Thread pool server. Get the IP address to bind to.
InetAddress listenAddress = getBindAddress(conf);
int readTimeout = conf.getInt(THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY, THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT);
TServerTransport serverTransport = new TServerSocket(new TServerSocket.ServerSocketTransportArgs().bindAddr(new InetSocketAddress(listenAddress, listenPort)).backlog(backlog).clientTimeout(readTimeout));
TBoundedThreadPoolServer.Args serverArgs = new TBoundedThreadPoolServer.Args(serverTransport, conf);
serverArgs.processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
LOG.info("starting " + ImplType.THREAD_POOL.simpleClassName() + " on " + listenAddress + ":" + Integer.toString(listenPort) + " with readTimeout " + readTimeout + "ms; " + serverArgs);
TBoundedThreadPoolServer tserver = new TBoundedThreadPoolServer(serverArgs, metrics);
this.tserver = tserver;
} else {
throw new AssertionError("Unsupported Thrift server implementation: " + implType.simpleClassName());
}
// A sanity check that we instantiated the right type of server.
if (tserver.getClass() != implType.serverClass) {
throw new AssertionError("Expected to create Thrift server class " + implType.serverClass.getName() + " but got " + tserver.getClass().getName());
}
registerFilters(conf);
}
use of org.apache.thrift.server.TThreadedSelectorServer in project buck by facebook.
the class ThriftCoordinatorServer method start.
public ThriftCoordinatorServer start() throws IOException {
synchronized (lock) {
try {
transport = new TNonblockingServerSocket(this.port);
} catch (TTransportException e) {
throw new ThriftException(e);
}
TThreadedSelectorServer.Args serverArgs = new TThreadedSelectorServer.Args(transport);
serverArgs.processor(processor);
server = new TThreadedSelectorServer(serverArgs);
serverThread = new Thread(() -> Preconditions.checkNotNull(server).serve());
serverThread.start();
}
return this;
}
use of org.apache.thrift.server.TThreadedSelectorServer in project hbase by apache.
the class ThriftServer method getTThreadedSelectorServer.
protected TServer getTThreadedSelectorServer(TNonblockingServerTransport serverTransport, TProtocolFactory protocolFactory, TProcessor processor, TTransportFactory transportFactory, InetSocketAddress inetSocketAddress) {
LOG.info("starting HBase ThreadedSelector Thrift server on " + inetSocketAddress.toString());
TThreadedSelectorServer.Args serverArgs = new HThreadedSelectorServerArgs(serverTransport, conf);
int queueSize = conf.getInt(TBoundedThreadPoolServer.MAX_QUEUED_REQUESTS_CONF_KEY, TBoundedThreadPoolServer.DEFAULT_MAX_QUEUED_REQUESTS);
CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(queueSize), metrics);
int workerThreads = conf.getInt(TBoundedThreadPoolServer.MAX_WORKER_THREADS_CONF_KEY, serverArgs.getWorkerThreads());
int selectorThreads = conf.getInt(THRIFT_SELECTOR_NUM, serverArgs.getSelectorThreads());
serverArgs.selectorThreads(selectorThreads);
ExecutorService executorService = createExecutor(callQueue, workerThreads, workerThreads);
serverArgs.executorService(executorService).processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
return new TThreadedSelectorServer(serverArgs);
}
use of org.apache.thrift.server.TThreadedSelectorServer in project dubbo by alibaba.
the class ThriftProtocol method getTServer.
private <T> TServer getTServer(T impl, Class<T> type, URL url) {
TThreadedSelectorServer.Args tArgs = null;
String typeName = type.getName();
TServer tserver;
if (typeName.endsWith(THRIFT_IFACE)) {
String processorClsName = typeName.substring(0, typeName.indexOf(THRIFT_IFACE)) + THRIFT_PROCESSOR;
try {
Class<?> clazz = Class.forName(processorClsName);
Constructor constructor = clazz.getConstructor(type);
TProcessor tprocessor = (TProcessor) constructor.newInstance(impl);
processor.registerProcessor(typeName, tprocessor);
tserver = SERVER_MAP.get(url.getAddress());
if (tserver == null) {
/**
*Solve the problem of only 50 of the default number of concurrent connections
*/
TNonblockingServerSocket.NonblockingAbstractServerSocketArgs args = new TNonblockingServerSocket.NonblockingAbstractServerSocketArgs();
/**
*1000 connections
*/
args.backlog(1000);
String bindIp = url.getParameter(Constants.BIND_IP_KEY, url.getHost());
if (url.getParameter(ANYHOST_KEY, false)) {
bindIp = ANYHOST_VALUE;
}
int bindPort = url.getParameter(Constants.BIND_PORT_KEY, url.getPort());
args.bindAddr(new InetSocketAddress(bindIp, bindPort));
/**
*timeout: 10s
*/
args.clientTimeout(10000);
TNonblockingServerSocket transport = new TNonblockingServerSocket(args);
tArgs = new TThreadedSelectorServer.Args(transport);
tArgs.workerThreads(200);
tArgs.selectorThreads(4);
tArgs.acceptQueueSizePerThread(256);
tArgs.processor(processor);
tArgs.transportFactory(new TFramedTransport.Factory());
tArgs.protocolFactory(new TCompactProtocol.Factory());
} else {
// if server is starting, return and do nothing here
return null;
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new RpcException("Fail to create nativethrift server(" + url + ") : " + e.getMessage(), e);
}
}
if (tArgs == null) {
logger.error("Fail to create native thrift server(" + url + ") due to null args");
throw new RpcException("Fail to create nativethrift server(" + url + ") due to null args");
}
tserver = new TThreadedSelectorServer(tArgs);
return tserver;
}
use of org.apache.thrift.server.TThreadedSelectorServer in project tech by ffyyhh995511.
the class Test3 method main.
public static void main(String[] args2) {
try {
// 非阻塞式的,配合TFramedTransport使用
TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(7911);
// 关联处理器与Service服务的实现
TProcessor processor = new Hello.Processor<Hello.Iface>(new HelloImpl());
// 目前Thrift提供的最高级的模式,可并发处理客户端请求
TThreadedSelectorServer.Args args = new TThreadedSelectorServer.Args(serverTransport);
args.processor(processor);
// 设置协议工厂,高效率的、密集的二进制编码格式进行数据传输协议
args.protocolFactory(new TCompactProtocol.Factory());
// 设置传输工厂,使用非阻塞方式,按块的大小进行传输,类似于Java中的NIO
args.transportFactory(new TFramedTransport.Factory());
// 设置处理器工厂,只返回一个单例实例
args.processorFactory(new TProcessorFactory(processor));
// 多个线程,主要负责客户端的IO处理
args.selectorThreads(2);
// 工作线程池
ExecutorService pool = Executors.newFixedThreadPool(3);
args.executorService(pool);
TThreadedSelectorServer server = new TThreadedSelectorServer(args);
System.out.println("Starting server on port " + 7911 + "......");
server.serve();
} catch (Exception e) {
e.printStackTrace();
}
}
Aggregations