use of org.apache.thrift.server.TThreadPoolServer in project hive by apache.
the class HiveMetaStore method startMetaStore.
/**
* Start Metastore based on a passed {@link HadoopThriftAuthBridge}
*
* @param port
* @param bridge
* @param conf
* configuration overrides
* @throws Throwable
*/
public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, Configuration conf, Lock startLock, Condition startCondition, AtomicBoolean startedServing) throws Throwable {
try {
isMetaStoreRemote = true;
// Server will create new threads up to max as necessary. After an idle
// period, it will destroy threads to keep the number of threads in the
// pool to min.
long maxMessageSize = MetastoreConf.getLongVar(conf, ConfVars.SERVER_MAX_MESSAGE_SIZE);
int minWorkerThreads = MetastoreConf.getIntVar(conf, ConfVars.SERVER_MIN_THREADS);
int maxWorkerThreads = MetastoreConf.getIntVar(conf, ConfVars.SERVER_MAX_THREADS);
boolean tcpKeepAlive = MetastoreConf.getBoolVar(conf, ConfVars.TCP_KEEP_ALIVE);
boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT);
boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL);
boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL);
useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL);
if (useSasl) {
// we are in secure mode. Login using keytab
String kerberosName = SecurityUtil.getServerPrincipal(MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL), "0.0.0.0");
String keyTabFile = MetastoreConf.getVar(conf, ConfVars.KERBEROS_KEYTAB_FILE);
UserGroupInformation.loginUserFromKeytab(kerberosName, keyTabFile);
}
TProcessor processor;
TTransportFactory transFactory;
final TProtocolFactory protocolFactory;
final TProtocolFactory inputProtoFactory;
if (useCompactProtocol) {
protocolFactory = new TCompactProtocol.Factory();
inputProtoFactory = new TCompactProtocol.Factory(maxMessageSize, maxMessageSize);
} else {
protocolFactory = new TBinaryProtocol.Factory();
inputProtoFactory = new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize);
}
HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, false);
IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf);
// Initialize materializations invalidation cache
MaterializationsInvalidationCache.get().init(conf, handler);
TServerSocket serverSocket;
if (useSasl) {
// we are in secure mode.
if (useFramedTransport) {
throw new HiveMetaException("Framed transport is not supported with SASL enabled.");
}
saslServer = bridge.createServer(MetastoreConf.getVar(conf, ConfVars.KERBEROS_KEYTAB_FILE), MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL), MetastoreConf.getVar(conf, ConfVars.CLIENT_KERBEROS_PRINCIPAL));
// Start delegation token manager
delegationTokenManager = new MetastoreDelegationTokenManager();
delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler, HadoopThriftAuthBridge.Server.ServerMode.METASTORE);
saslServer.setSecretManager(delegationTokenManager.getSecretManager());
transFactory = saslServer.createTransportFactory(MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
processor = saslServer.wrapProcessor(new ThriftHiveMetastore.Processor<>(handler));
LOG.info("Starting DB backed MetaStore Server in Secure Mode");
} else {
// we are in unsecure mode.
if (MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)) {
transFactory = useFramedTransport ? new ChainedTTransportFactory(new TFramedTransport.Factory(), new TUGIContainingTransport.Factory()) : new TUGIContainingTransport.Factory();
processor = new TUGIBasedProcessor<>(handler);
LOG.info("Starting DB backed MetaStore Server with SetUGI enabled");
} else {
transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory();
processor = new TSetIpAddressProcessor<>(handler);
LOG.info("Starting DB backed MetaStore Server");
}
}
if (!useSSL) {
serverSocket = SecurityUtils.getServerSocket(null, port);
} else {
String keyStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_KEYSTORE_PATH).trim();
if (keyStorePath.isEmpty()) {
throw new IllegalArgumentException(ConfVars.SSL_KEYSTORE_PATH.toString() + " Not configured for SSL connection");
}
String keyStorePassword = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_KEYSTORE_PASSWORD);
// enable SSL support for HMS
List<String> sslVersionBlacklist = new ArrayList<>();
for (String sslVersion : MetastoreConf.getVar(conf, ConfVars.SSL_PROTOCOL_BLACKLIST).split(",")) {
sslVersionBlacklist.add(sslVersion);
}
serverSocket = SecurityUtils.getServerSSLSocket(null, port, keyStorePath, keyStorePassword, sslVersionBlacklist);
}
if (tcpKeepAlive) {
serverSocket = new TServerSocketKeepAlive(serverSocket);
}
// Metrics will have already been initialized if we're using them since HMSHandler
// initializes them.
openConnections = Metrics.getOrCreateGauge(MetricsConstants.OPEN_CONNECTIONS);
TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverSocket).processor(processor).transportFactory(transFactory).protocolFactory(protocolFactory).inputProtocolFactory(inputProtoFactory).minWorkerThreads(minWorkerThreads).maxWorkerThreads(maxWorkerThreads);
TServer tServer = new TThreadPoolServer(args);
TServerEventHandler tServerEventHandler = new TServerEventHandler() {
@Override
public void preServe() {
}
@Override
public ServerContext createContext(TProtocol tProtocol, TProtocol tProtocol1) {
openConnections.incrementAndGet();
return null;
}
@Override
public void deleteContext(ServerContext serverContext, TProtocol tProtocol, TProtocol tProtocol1) {
openConnections.decrementAndGet();
// If the IMetaStoreClient#close was called, HMSHandler#shutdown would have already
// cleaned up thread local RawStore. Otherwise, do it now.
cleanupRawStore();
}
@Override
public void processContext(ServerContext serverContext, TTransport tTransport, TTransport tTransport1) {
}
};
tServer.setServerEventHandler(tServerEventHandler);
HMSHandler.LOG.info("Started the new metaserver on port [" + port + "]...");
HMSHandler.LOG.info("Options.minWorkerThreads = " + minWorkerThreads);
HMSHandler.LOG.info("Options.maxWorkerThreads = " + maxWorkerThreads);
HMSHandler.LOG.info("TCP keepalive = " + tcpKeepAlive);
HMSHandler.LOG.info("Enable SSL = " + useSSL);
if (startLock != null) {
signalOtherThreadsToStart(tServer, startLock, startCondition, startedServing);
}
tServer.serve();
} catch (Throwable x) {
x.printStackTrace();
HMSHandler.LOG.error(StringUtils.stringifyException(x));
throw x;
}
}
use of org.apache.thrift.server.TThreadPoolServer in project accumulo by apache.
the class TServerUtils method createSaslThreadPoolServer.
public static ServerAddress createSaslThreadPoolServer(HostAndPort address, TProcessor processor, TProtocolFactory protocolFactory, long socketTimeout, SaslServerConnectionParams params, final String serverName, String threadName, final int numThreads, final int numSTThreads, long timeBetweenThreadChecks) throws TTransportException {
// We'd really prefer to use THsHaServer (or similar) to avoid 1 RPC == 1 Thread that the TThreadPoolServer does,
// but sadly this isn't the case. Because TSaslTransport needs to issue a handshake when it open()'s which will fail
// when the server does an accept() to (presumably) wake up the eventing system.
log.info("Creating SASL thread pool thrift server on listening on {}:{}", address.getHost(), address.getPort());
TServerSocket transport = new TServerSocket(address.getPort(), (int) socketTimeout);
String hostname, fqdn;
try {
hostname = InetAddress.getByName(address.getHost()).getCanonicalHostName();
fqdn = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
transport.close();
throw new TTransportException(e);
}
// If we can't get a real hostname from the provided host test, use the hostname from DNS for localhost
if ("0.0.0.0".equals(hostname)) {
hostname = fqdn;
}
// their configuration.
if (!hostname.equals(fqdn)) {
log.error("Expected hostname of '{}' but got '{}'. Ensure the entries in the Accumulo hosts files (e.g. masters, tservers) are the FQDN for each host when using SASL.", fqdn, hostname);
transport.close();
throw new RuntimeException("SASL requires that the address the thrift server listens on is the same as the FQDN for this host");
}
final UserGroupInformation serverUser;
try {
serverUser = UserGroupInformation.getLoginUser();
} catch (IOException e) {
transport.close();
throw new TTransportException(e);
}
log.debug("Logged in as {}, creating TSaslServerTransport factory with {}/{}", serverUser, params.getKerberosServerPrimary(), hostname);
// Make the SASL transport factory with the instance and primary from the kerberos server principal, SASL properties
// and the SASL callback handler from Hadoop to ensure authorization ID is the authentication ID. Despite the 'protocol' argument seeming to be useless, it
// *must* be the primary of the server.
TSaslServerTransport.Factory saslTransportFactory = new TSaslServerTransport.Factory();
saslTransportFactory.addServerDefinition(ThriftUtil.GSSAPI, params.getKerberosServerPrimary(), hostname, params.getSaslProperties(), new SaslRpcServer.SaslGssCallbackHandler());
if (null != params.getSecretManager()) {
log.info("Adding DIGEST-MD5 server definition for delegation tokens");
saslTransportFactory.addServerDefinition(ThriftUtil.DIGEST_MD5, params.getKerberosServerPrimary(), hostname, params.getSaslProperties(), new SaslServerDigestCallbackHandler(params.getSecretManager()));
} else {
log.info("SecretManager is null, not adding support for delegation token authentication");
}
// Make sure the TTransportFactory is performing a UGI.doAs
TTransportFactory ugiTransportFactory = new UGIAssumingTransportFactory(saslTransportFactory, serverUser);
if (address.getPort() == 0) {
// If we chose a port dynamically, make a new use it (along with the proper hostname)
address = HostAndPort.fromParts(address.getHost(), transport.getServerSocket().getLocalPort());
log.info("SASL thrift server bound on {}", address);
}
ThreadPoolExecutor pool = createSelfResizingThreadPool(serverName, numThreads, numSTThreads, timeBetweenThreadChecks);
final TThreadPoolServer server = createTThreadPoolServer(transport, processor, ugiTransportFactory, protocolFactory, pool);
return new ServerAddress(server, address);
}
use of org.apache.thrift.server.TThreadPoolServer in project accumulo by apache.
the class TServerUtils method createBlockingServer.
/**
* Creates a TTheadPoolServer for normal unsecure operation. Useful for comparing performance against SSL or SASL transports.
*
* @param address
* Address to bind to
* @param processor
* TProcessor for the server
* @param maxMessageSize
* Maximum size of a Thrift message allowed
* @return A configured TThreadPoolServer and its bound address information
*/
public static ServerAddress createBlockingServer(HostAndPort address, TProcessor processor, TProtocolFactory protocolFactory, long maxMessageSize, String serverName, int numThreads, int numSimpleTimerThreads, long timeBetweenThreadChecks) throws TTransportException {
TServerSocket transport = new TServerSocket(address.getPort());
ThreadPoolExecutor pool = createSelfResizingThreadPool(serverName, numThreads, numSimpleTimerThreads, timeBetweenThreadChecks);
TThreadPoolServer server = createTThreadPoolServer(transport, processor, ThriftUtil.transportFactory(maxMessageSize), protocolFactory, pool);
if (address.getPort() == 0) {
address = HostAndPort.fromParts(address.getHost(), transport.getServerSocket().getLocalPort());
log.info("Blocking Server bound on {}", address);
}
return new ServerAddress(server, address);
}
use of org.apache.thrift.server.TThreadPoolServer in project dubbo by alibaba.
the class ServiceMethodNotFoundTest method init.
protected void init() throws Exception {
TServerTransport serverTransport = new TServerSocket(PORT);
DubboDemoImpl impl = new DubboDemoImpl();
$__DemoStub.Processor processor = new $__DemoStub.Processor(impl);
// for test
Field field = processor.getClass().getSuperclass().getDeclaredField("processMap");
ReflectUtils.makeAccessible(field);
Object obj = field.get(processor);
if (obj instanceof Map) {
((Map) obj).remove("echoString");
}
// ~
TBinaryProtocol.Factory bFactory = new TBinaryProtocol.Factory();
MultiServiceProcessor wrapper = new MultiServiceProcessor();
wrapper.addProcessor(Demo.class, processor);
server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport).inputProtocolFactory(bFactory).outputProtocolFactory(bFactory).inputTransportFactory(getTransportFactory()).outputTransportFactory(getTransportFactory()).processor(wrapper));
Thread startTread = new Thread() {
@Override
public void run() {
server.serve();
}
};
startTread.start();
while (!server.isServing()) {
Thread.sleep(100);
}
}
use of org.apache.thrift.server.TThreadPoolServer in project tech by ffyyhh995511.
the class Test1 method main.
/**
* 编写服务端,发布(阻塞式IO + 多线程处理)服务
*
* @param args
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public static void main(String[] args) {
try {
// 设置传输通道,普通通道
TServerTransport serverTransport = new TServerSocket(7911);
// 使用高密度二进制协议
TProtocolFactory proFactory = new TCompactProtocol.Factory();
// 设置处理器HelloImpl
TProcessor processor = new Hello.Processor(new HelloImpl());
// 创建服务器
Args args2 = new Args(serverTransport);
args2.protocolFactory(proFactory);
args2.processor(processor);
TServer server = new TThreadPoolServer(args2);
System.out.println("Start server on port 7911...");
server.serve();
} catch (Exception e) {
e.printStackTrace();
}
}
Aggregations