use of org.apache.thrift.protocol.TProtocolFactory in project hive by apache.
the class DynamicSerDe method initialize.
@Override
public void initialize(Configuration job, Properties tbl) throws SerDeException {
try {
String ddl = tbl.getProperty(serdeConstants.SERIALIZATION_DDL);
// type_name used to be tbl.getProperty(META_TABLE_NAME).
// However, now the value is DBName.TableName. To make it backward compatible,
// we take the TableName part as type_name.
//
String tableName = tbl.getProperty(META_TABLE_NAME);
int index = tableName.indexOf('.');
if (index != -1) {
type_name = tableName.substring(index + 1, tableName.length());
} else {
type_name = tableName;
}
String protoName = tbl.getProperty(serdeConstants.SERIALIZATION_FORMAT);
if (protoName == null) {
protoName = "org.apache.thrift.protocol.TBinaryProtocol";
}
// For backward compatibility
protoName = protoName.replace("com.facebook.thrift.protocol", "org.apache.thrift.protocol");
TProtocolFactory protFactory = TReflectionUtils.getProtocolFactoryByName(protoName);
bos_ = new ByteStream.Output();
bis_ = new ByteStream.Input();
tios = new TIOStreamTransport(bis_, bos_);
oprot_ = protFactory.getProtocol(tios);
iprot_ = protFactory.getProtocol(tios);
if (oprot_ instanceof org.apache.hadoop.hive.serde2.thrift.ConfigurableTProtocol) {
((ConfigurableTProtocol) oprot_).initialize(job, tbl);
}
if (iprot_ instanceof org.apache.hadoop.hive.serde2.thrift.ConfigurableTProtocol) {
((ConfigurableTProtocol) iprot_).initialize(job, tbl);
}
// in theory the include path should come from the configuration
List<String> include_path = new ArrayList<String>();
include_path.add(".");
LOG.debug("ddl=" + ddl);
parse_tree = new thrift_grammar(new ByteArrayInputStream(ddl.getBytes()), include_path, false);
parse_tree.Start();
bt = (DynamicSerDeStructBase) parse_tree.types.get(type_name);
if (bt == null) {
bt = (DynamicSerDeStructBase) parse_tree.tables.get(type_name);
}
if (bt == null) {
throw new SerDeException("Could not lookup table type " + type_name + " in this ddl: " + ddl);
}
bt.initialize();
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
throw new SerDeException(e);
}
}
use of org.apache.thrift.protocol.TProtocolFactory in project hive by apache.
the class ThriftHttpCLIService method run.
/**
* Configure Jetty to serve http requests. Example of a client connection URL:
* http://localhost:10000/servlets/thrifths2/ A gateway may cause actual target URL to differ,
* e.g. http://gateway:port/hive2/servlets/thrifths2/
*/
@Override
public void run() {
try {
// HTTP Server
httpServer = new org.eclipse.jetty.server.Server();
// Server thread pool
// Start with minWorkerThreads, expand till maxWorkerThreads and reject subsequent requests
String threadPoolName = "HiveServer2-HttpHandler-Pool";
ExecutorService executorService = new ThreadPoolExecutorWithOomHook(minWorkerThreads, maxWorkerThreads, workerKeepAliveTime, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new ThreadFactoryWithGarbageCleanup(threadPoolName), oomHook);
ExecutorThreadPool threadPool = new ExecutorThreadPool(executorService);
httpServer.setThreadPool(threadPool);
// Connector configs
SelectChannelConnector connector = new SelectChannelConnector();
// Configure header size
int requestHeaderSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE);
int responseHeaderSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE);
connector.setRequestHeaderSize(requestHeaderSize);
connector.setResponseHeaderSize(responseHeaderSize);
boolean useSsl = hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL);
String schemeName = useSsl ? "https" : "http";
// Change connector if SSL is used
if (useSsl) {
String keyStorePath = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH).trim();
String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf, HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname);
if (keyStorePath.isEmpty()) {
throw new IllegalArgumentException(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname + " Not configured for SSL connection");
}
SslContextFactory sslContextFactory = new SslContextFactory();
String[] excludedProtocols = hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",");
LOG.info("HTTP Server SSL: adding excluded protocols: " + Arrays.toString(excludedProtocols));
sslContextFactory.addExcludeProtocols(excludedProtocols);
LOG.info("HTTP Server SSL: SslContextFactory.getExcludeProtocols = " + Arrays.toString(sslContextFactory.getExcludeProtocols()));
sslContextFactory.setKeyStorePath(keyStorePath);
sslContextFactory.setKeyStorePassword(keyStorePassword);
connector = new SslSelectChannelConnector(sslContextFactory);
}
connector.setPort(portNum);
// Linux:yes, Windows:no
connector.setReuseAddress(true);
int maxIdleTime = (int) hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME, TimeUnit.MILLISECONDS);
connector.setMaxIdleTime(maxIdleTime);
httpServer.addConnector(connector);
// Thrift configs
hiveAuthFactory = new HiveAuthFactory(hiveConf);
TProcessor processor = new TCLIService.Processor<Iface>(this);
TProtocolFactory protocolFactory = new TBinaryProtocol.Factory();
// Set during the init phase of HiveServer2 if auth mode is kerberos
// UGI for the hive/_HOST (kerberos) principal
UserGroupInformation serviceUGI = cliService.getServiceUGI();
// UGI for the http/_HOST (SPNego) principal
UserGroupInformation httpUGI = cliService.getHttpUGI();
String authType = hiveConf.getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION);
TServlet thriftHttpServlet = new ThriftHttpServlet(processor, protocolFactory, authType, serviceUGI, httpUGI, hiveAuthFactory);
// Context handler
final ServletContextHandler context = new ServletContextHandler(ServletContextHandler.SESSIONS);
context.setContextPath("/");
if (hiveConf.getBoolean(ConfVars.HIVE_SERVER2_XSRF_FILTER_ENABLED.varname, false)) {
// context.addFilter(Utils.getXSRFFilterHolder(null, null), "/" ,
// FilterMapping.REQUEST);
// Filtering does not work here currently, doing filter in ThriftHttpServlet
LOG.debug("XSRF filter enabled");
} else {
LOG.warn("XSRF filter disabled");
}
String httpPath = getHttpPath(hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH));
httpServer.setHandler(context);
context.addServlet(new ServletHolder(thriftHttpServlet), httpPath);
// TODO: check defaults: maxTimeout, keepalive, maxBodySize, bodyRecieveDuration, etc.
// Finally, start the server
httpServer.start();
String msg = "Started " + ThriftHttpCLIService.class.getSimpleName() + " in " + schemeName + " mode on port " + portNum + " path=" + httpPath + " with " + minWorkerThreads + "..." + maxWorkerThreads + " worker threads";
LOG.info(msg);
httpServer.join();
} catch (Throwable t) {
LOG.error("Error starting HiveServer2: could not start " + ThriftHttpCLIService.class.getSimpleName(), t);
System.exit(-1);
}
}
use of org.apache.thrift.protocol.TProtocolFactory in project hbase by apache.
the class ThriftServerRunner method setupServer.
/**
* Setting up the thrift TServer
*/
private void setupServer() throws Exception {
// Construct correct ProtocolFactory
TProtocolFactory protocolFactory;
if (conf.getBoolean(COMPACT_CONF_KEY, false)) {
LOG.debug("Using compact protocol");
protocolFactory = new TCompactProtocol.Factory();
} else {
LOG.debug("Using binary protocol");
protocolFactory = new TBinaryProtocol.Factory();
}
final TProcessor p = new Hbase.Processor<>(handler);
ImplType implType = ImplType.getServerImpl(conf);
TProcessor processor = p;
// Construct correct TransportFactory
TTransportFactory transportFactory;
if (conf.getBoolean(FRAMED_CONF_KEY, false) || implType.isAlwaysFramed) {
if (qop != null) {
throw new RuntimeException("Thrift server authentication" + " doesn't work with framed transport yet");
}
transportFactory = new TFramedTransport.Factory(conf.getInt(MAX_FRAME_SIZE_CONF_KEY, 2) * 1024 * 1024);
LOG.debug("Using framed transport");
} else if (qop == null) {
transportFactory = new TTransportFactory();
} else {
// Extract the name from the principal
String name = SecurityUtil.getUserFromPrincipal(conf.get("hbase.thrift.kerberos.principal"));
Map<String, String> saslProperties = new HashMap<>();
saslProperties.put(Sasl.QOP, qop);
TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory();
saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties, new SaslGssCallbackHandler() {
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof AuthorizeCallback) {
ac = (AuthorizeCallback) callback;
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL GSSAPI Callback");
}
}
if (ac != null) {
String authid = ac.getAuthenticationID();
String authzid = ac.getAuthorizationID();
if (!authid.equals(authzid)) {
ac.setAuthorized(false);
} else {
ac.setAuthorized(true);
String userName = SecurityUtil.getUserFromPrincipal(authzid);
LOG.info("Effective user: " + userName);
ac.setAuthorizedID(userName);
}
}
}
});
transportFactory = saslFactory;
// Create a processor wrapper, to get the caller
processor = new TProcessor() {
@Override
public boolean process(TProtocol inProt, TProtocol outProt) throws TException {
TSaslServerTransport saslServerTransport = (TSaslServerTransport) inProt.getTransport();
SaslServer saslServer = saslServerTransport.getSaslServer();
String principal = saslServer.getAuthorizationID();
hbaseHandler.setEffectiveUser(principal);
return p.process(inProt, outProt);
}
};
}
if (conf.get(BIND_CONF_KEY) != null && !implType.canSpecifyBindIP) {
LOG.error("Server types " + Joiner.on(", ").join(ImplType.serversThatCannotSpecifyBindIP()) + " don't support IP " + "address binding at the moment. See " + "https://issues.apache.org/jira/browse/HBASE-2155 for details.");
throw new RuntimeException("-" + BIND_CONF_KEY + " not supported with " + implType);
}
// Thrift's implementation uses '0' as a placeholder for 'use the default.'
int backlog = conf.getInt(BACKLOG_CONF_KEY, 0);
if (implType == ImplType.HS_HA || implType == ImplType.NONBLOCKING || implType == ImplType.THREADED_SELECTOR) {
InetAddress listenAddress = getBindAddress(conf);
TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(new InetSocketAddress(listenAddress, listenPort));
if (implType == ImplType.NONBLOCKING) {
TNonblockingServer.Args serverArgs = new TNonblockingServer.Args(serverTransport);
serverArgs.processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
tserver = new TNonblockingServer(serverArgs);
} else if (implType == ImplType.HS_HA) {
THsHaServer.Args serverArgs = new THsHaServer.Args(serverTransport);
CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics);
ExecutorService executorService = createExecutor(callQueue, serverArgs.getMaxWorkerThreads(), serverArgs.getMaxWorkerThreads());
serverArgs.executorService(executorService).processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
tserver = new THsHaServer(serverArgs);
} else {
// THREADED_SELECTOR
TThreadedSelectorServer.Args serverArgs = new HThreadedSelectorServerArgs(serverTransport, conf);
CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<>(), metrics);
ExecutorService executorService = createExecutor(callQueue, serverArgs.getWorkerThreads(), serverArgs.getWorkerThreads());
serverArgs.executorService(executorService).processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
tserver = new TThreadedSelectorServer(serverArgs);
}
LOG.info("starting HBase " + implType.simpleClassName() + " server on " + Integer.toString(listenPort));
} else if (implType == ImplType.THREAD_POOL) {
// Thread pool server. Get the IP address to bind to.
InetAddress listenAddress = getBindAddress(conf);
int readTimeout = conf.getInt(THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY, THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT);
TServerTransport serverTransport = new TServerSocket(new TServerSocket.ServerSocketTransportArgs().bindAddr(new InetSocketAddress(listenAddress, listenPort)).backlog(backlog).clientTimeout(readTimeout));
TBoundedThreadPoolServer.Args serverArgs = new TBoundedThreadPoolServer.Args(serverTransport, conf);
serverArgs.processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory);
LOG.info("starting " + ImplType.THREAD_POOL.simpleClassName() + " on " + listenAddress + ":" + Integer.toString(listenPort) + " with readTimeout " + readTimeout + "ms; " + serverArgs);
TBoundedThreadPoolServer tserver = new TBoundedThreadPoolServer(serverArgs, metrics);
this.tserver = tserver;
} else {
throw new AssertionError("Unsupported Thrift server implementation: " + implType.simpleClassName());
}
// A sanity check that we instantiated the right type of server.
if (tserver.getClass() != implType.serverClass) {
throw new AssertionError("Expected to create Thrift server class " + implType.serverClass.getName() + " but got " + tserver.getClass().getName());
}
registerFilters(conf);
}
use of org.apache.thrift.protocol.TProtocolFactory in project druid by druid-io.
the class ThriftDeserialization method detectAndDeserialize.
/**
* Deserializes byte-array into thrift object.
* <p>
* Supporting binary, compact and json protocols,
* and the byte array could be or not be encoded by Base64.
*
* @param bytes the byte-array to deserialize
* @param thriftObj the output thrift object
*
* @return the output thrift object, or null if error occurs
*/
public static <T extends TBase> T detectAndDeserialize(final byte[] bytes, final T thriftObj) throws TException {
Preconditions.checkNotNull(thriftObj);
try {
final byte[] src = decodeB64IfNeeded(bytes);
final TProtocolFactory protocolFactory = TProtocolUtil.guessProtocolFactory(src, null);
Preconditions.checkNotNull(protocolFactory);
if (protocolFactory instanceof TCompactProtocol.Factory) {
DESERIALIZER_COMPACT.get().deserialize(thriftObj, src);
} else if (protocolFactory instanceof TBinaryProtocol.Factory) {
DESERIALIZER_BINARY.get().deserialize(thriftObj, src);
} else {
DESERIALIZER_JSON.get().deserialize(thriftObj, src);
}
} catch (final IllegalArgumentException e) {
throw new TException(e);
}
return thriftObj;
}
use of org.apache.thrift.protocol.TProtocolFactory in project pinpoint by naver.
the class LegacyAgentStatMapperTest method createResultForLegacyWith_AGENT_STAT_CF_STATISTICS_V1.
private Result createResultForLegacyWith_AGENT_STAT_CF_STATISTICS_V1() throws TException {
final TAgentStat agentStat = new TAgentStat();
final TJvmGc gc = new TJvmGc();
gc.setType(GC_TYPE);
gc.setJvmGcOldCount(GC_OLD_COUNT);
gc.setJvmGcOldTime(GC_OLD_TIME);
gc.setJvmMemoryHeapUsed(HEAP_USED);
gc.setJvmMemoryHeapMax(HEAP_MAX);
gc.setJvmMemoryNonHeapUsed(NON_HEAP_USED);
gc.setJvmMemoryNonHeapMax(NON_HEAP_MAX);
agentStat.setGc(gc);
final TProtocolFactory factory = new TCompactProtocol.Factory();
final TSerializer serializer = new TSerializer(factory);
final byte[] qualifier = AGENT_STAT_CF_STATISTICS_V1;
final byte[] value = serializer.serialize(agentStat);
return Result.create(Arrays.asList(createCell(qualifier, value)));
}
Aggregations