use of org.apache.thrift.transport.TTransport in project dubbo by alibaba.
the class ThriftCodecTest method testDecodeReplyResponse.
@Test
public void testDecodeReplyResponse() throws Exception {
URL url = URL.valueOf(ThriftProtocol.NAME + "://127.0.0.1:40880/" + Demo.Iface.class.getName());
Channel channel = new MockedChannel(url);
RandomAccessByteArrayOutputStream bos = new RandomAccessByteArrayOutputStream(128);
Request request = createRequest();
DefaultFuture future = new DefaultFuture(channel, request, 10);
TMessage message = new TMessage("echoString", TMessageType.REPLY, ThriftCodec.getSeqId());
Demo.echoString_result methodResult = new Demo.echoString_result();
methodResult.success = "Hello, World!";
TTransport transport = new TIOStreamTransport(bos);
TBinaryProtocol protocol = new TBinaryProtocol(transport);
int messageLength, headerLength;
// prepare
protocol.writeI16(ThriftCodec.MAGIC);
protocol.writeI32(Integer.MAX_VALUE);
protocol.writeI16(Short.MAX_VALUE);
protocol.writeByte(ThriftCodec.VERSION);
protocol.writeString(Demo.Iface.class.getName());
protocol.writeI64(request.getId());
protocol.getTransport().flush();
headerLength = bos.size();
protocol.writeMessageBegin(message);
methodResult.write(protocol);
protocol.writeMessageEnd();
protocol.getTransport().flush();
int oldIndex = messageLength = bos.size();
try {
bos.setWriteIndex(ThriftCodec.MESSAGE_LENGTH_INDEX);
protocol.writeI32(messageLength);
bos.setWriteIndex(ThriftCodec.MESSAGE_HEADER_LENGTH_INDEX);
protocol.writeI16((short) (0xffff & headerLength));
} finally {
bos.setWriteIndex(oldIndex);
}
// prepare
byte[] buf = new byte[4 + bos.size()];
System.arraycopy(bos.toByteArray(), 0, buf, 4, bos.size());
ChannelBuffer bis = ChannelBuffers.wrappedBuffer(buf);
Object obj = codec.decode((Channel) null, bis);
Assert.assertNotNull(obj);
Assert.assertEquals(true, obj instanceof Response);
Response response = (Response) obj;
Assert.assertEquals(request.getId(), response.getId());
Assert.assertTrue(response.getResult() instanceof RpcResult);
RpcResult result = (RpcResult) response.getResult();
Assert.assertTrue(result.getResult() instanceof String);
Assert.assertEquals(methodResult.success, result.getResult());
}
use of org.apache.thrift.transport.TTransport in project dubbo by alibaba.
the class ThriftCodecTest method testDecodeExceptionResponse.
@Test
public void testDecodeExceptionResponse() throws Exception {
URL url = URL.valueOf(ThriftProtocol.NAME + "://127.0.0.1:40880/" + Demo.class.getName());
Channel channel = new MockedChannel(url);
RandomAccessByteArrayOutputStream bos = new RandomAccessByteArrayOutputStream(128);
Request request = createRequest();
DefaultFuture future = new DefaultFuture(channel, request, 10);
TMessage message = new TMessage("echoString", TMessageType.EXCEPTION, ThriftCodec.getSeqId());
TTransport transport = new TIOStreamTransport(bos);
TBinaryProtocol protocol = new TBinaryProtocol(transport);
TApplicationException exception = new TApplicationException();
int messageLength, headerLength;
// prepare
protocol.writeI16(ThriftCodec.MAGIC);
protocol.writeI32(Integer.MAX_VALUE);
protocol.writeI16(Short.MAX_VALUE);
protocol.writeByte(ThriftCodec.VERSION);
protocol.writeString(Demo.class.getName());
protocol.writeI64(request.getId());
protocol.getTransport().flush();
headerLength = bos.size();
protocol.writeMessageBegin(message);
exception.write(protocol);
protocol.writeMessageEnd();
protocol.getTransport().flush();
int oldIndex = messageLength = bos.size();
try {
bos.setWriteIndex(ThriftCodec.MESSAGE_LENGTH_INDEX);
protocol.writeI32(messageLength);
bos.setWriteIndex(ThriftCodec.MESSAGE_HEADER_LENGTH_INDEX);
protocol.writeI16((short) (0xffff & headerLength));
} finally {
bos.setWriteIndex(oldIndex);
}
// prepare
ChannelBuffer bis = ChannelBuffers.wrappedBuffer(encodeFrame(bos.toByteArray()));
Object obj = codec.decode((Channel) null, bis);
Assert.assertNotNull(obj);
Assert.assertTrue(obj instanceof Response);
Response response = (Response) obj;
Assert.assertTrue(response.getResult() instanceof RpcResult);
RpcResult result = (RpcResult) response.getResult();
Assert.assertTrue(result.hasException());
Assert.assertTrue(result.getException() instanceof RpcException);
}
use of org.apache.thrift.transport.TTransport in project hive by apache.
the class HiveMetaStore method startMetaStore.
/**
* Start Metastore based on a passed {@link HadoopThriftAuthBridge}
*
* @param port
* @param bridge
* @param conf
* configuration overrides
* @throws Throwable
*/
public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, HiveConf conf, Lock startLock, Condition startCondition, AtomicBoolean startedServing) throws Throwable {
try {
isMetaStoreRemote = true;
// Server will create new threads up to max as necessary. After an idle
// period, it will destroy threads to keep the number of threads in the
// pool to min.
long maxMessageSize = conf.getLongVar(HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE);
int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS);
int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS);
boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE);
boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
boolean useCompactProtocol = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_COMPACT_PROTOCOL);
boolean useSSL = conf.getBoolVar(ConfVars.HIVE_METASTORE_USE_SSL);
useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL);
TProcessor processor;
TTransportFactory transFactory;
final TProtocolFactory protocolFactory;
final TProtocolFactory inputProtoFactory;
if (useCompactProtocol) {
protocolFactory = new TCompactProtocol.Factory();
inputProtoFactory = new TCompactProtocol.Factory(maxMessageSize, maxMessageSize);
} else {
protocolFactory = new TBinaryProtocol.Factory();
inputProtoFactory = new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize);
}
HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, false);
IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf);
TServerSocket serverSocket = null;
if (useSasl) {
// we are in secure mode.
if (useFramedTransport) {
throw new HiveMetaException("Framed transport is not supported with SASL enabled.");
}
saslServer = bridge.createServer(conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE), conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL));
// Start delegation token manager
delegationTokenManager = new HiveDelegationTokenManager();
delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler, ServerMode.METASTORE);
saslServer.setSecretManager(delegationTokenManager.getSecretManager());
transFactory = saslServer.createTransportFactory(MetaStoreUtils.getMetaStoreSaslProperties(conf));
processor = saslServer.wrapProcessor(new ThriftHiveMetastore.Processor<IHMSHandler>(handler));
serverSocket = HiveAuthUtils.getServerSocket(null, port);
LOG.info("Starting DB backed MetaStore Server in Secure Mode");
} else {
// we are in unsecure mode.
if (conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) {
transFactory = useFramedTransport ? new ChainedTTransportFactory(new TFramedTransport.Factory(), new TUGIContainingTransport.Factory()) : new TUGIContainingTransport.Factory();
processor = new TUGIBasedProcessor<IHMSHandler>(handler);
LOG.info("Starting DB backed MetaStore Server with SetUGI enabled");
} else {
transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory();
processor = new TSetIpAddressProcessor<IHMSHandler>(handler);
LOG.info("Starting DB backed MetaStore Server");
}
// enable SSL support for HMS
List<String> sslVersionBlacklist = new ArrayList<String>();
for (String sslVersion : conf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) {
sslVersionBlacklist.add(sslVersion);
}
if (!useSSL) {
serverSocket = HiveAuthUtils.getServerSocket(null, port);
} else {
String keyStorePath = conf.getVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH).trim();
if (keyStorePath.isEmpty()) {
throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname + " Not configured for SSL connection");
}
String keyStorePassword = ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname);
serverSocket = HiveAuthUtils.getServerSSLSocket(null, port, keyStorePath, keyStorePassword, sslVersionBlacklist);
}
}
if (tcpKeepAlive) {
serverSocket = new TServerSocketKeepAlive(serverSocket);
}
TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverSocket).processor(processor).transportFactory(transFactory).protocolFactory(protocolFactory).inputProtocolFactory(inputProtoFactory).minWorkerThreads(minWorkerThreads).maxWorkerThreads(maxWorkerThreads);
TServer tServer = new TThreadPoolServer(args);
TServerEventHandler tServerEventHandler = new TServerEventHandler() {
@Override
public void preServe() {
}
@Override
public ServerContext createContext(TProtocol tProtocol, TProtocol tProtocol1) {
try {
Metrics metrics = MetricsFactory.getInstance();
if (metrics != null) {
metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
}
} catch (Exception e) {
LOG.warn("Error Reporting Metastore open connection to Metrics system", e);
}
return null;
}
@Override
public void deleteContext(ServerContext serverContext, TProtocol tProtocol, TProtocol tProtocol1) {
try {
Metrics metrics = MetricsFactory.getInstance();
if (metrics != null) {
metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
}
} catch (Exception e) {
LOG.warn("Error Reporting Metastore close connection to Metrics system", e);
}
// If the IMetaStoreClient#close was called, HMSHandler#shutdown would have already
// cleaned up thread local RawStore. Otherwise, do it now.
cleanupRawStore();
}
@Override
public void processContext(ServerContext serverContext, TTransport tTransport, TTransport tTransport1) {
}
};
tServer.setServerEventHandler(tServerEventHandler);
HMSHandler.LOG.info("Started the new metaserver on port [" + port + "]...");
HMSHandler.LOG.info("Options.minWorkerThreads = " + minWorkerThreads);
HMSHandler.LOG.info("Options.maxWorkerThreads = " + maxWorkerThreads);
HMSHandler.LOG.info("TCP keepalive = " + tcpKeepAlive);
if (startLock != null) {
signalOtherThreadsToStart(tServer, startLock, startCondition, startedServing);
}
tServer.serve();
} catch (Throwable x) {
x.printStackTrace();
HMSHandler.LOG.error(StringUtils.stringifyException(x));
throw x;
}
}
use of org.apache.thrift.transport.TTransport in project hive by apache.
the class TSetIpAddressProcessor method setIpAddress.
protected void setIpAddress(final TProtocol in) {
TTransport transport = in.getTransport();
TSocket tSocket = getUnderlyingSocketFromTransport(transport);
if (tSocket == null) {
LOGGER.warn("Unknown Transport, cannot determine ipAddress");
} else {
THREAD_LOCAL_IP_ADDRESS.set(tSocket.getSocket().getInetAddress().getHostAddress());
}
}
use of org.apache.thrift.transport.TTransport in project hbase by apache.
the class TestThriftServerCmdLine method talkToThriftServer.
private void talkToThriftServer() throws Exception {
TSocket sock = new TSocket(InetAddress.getLocalHost().getHostName(), port);
TTransport transport = sock;
if (specifyFramed || implType.isAlwaysFramed) {
transport = new TFramedTransport(transport);
}
sock.open();
try {
TProtocol prot;
if (specifyCompact) {
prot = new TCompactProtocol(transport);
} else {
prot = new TBinaryProtocol(transport);
}
Hbase.Client client = new Hbase.Client(prot);
if (!tableCreated) {
TestThriftServer.createTestTables(client);
tableCreated = true;
}
TestThriftServer.checkTableList(client);
} finally {
sock.close();
}
}
Aggregations