Search in sources :

Example 6 with TProtocolFactory

use of org.apache.thrift.protocol.TProtocolFactory in project hbase by apache.

the class ThriftServerRunner method setupHTTPServer.

private void setupHTTPServer() throws IOException {
    TProtocolFactory protocolFactory = new TBinaryProtocol.Factory();
    TProcessor processor = new Hbase.Processor<>(handler);
    TServlet thriftHttpServlet = new ThriftHttpServlet(processor, protocolFactory, realUser, conf, hbaseHandler, securityEnabled, doAsEnabled);
    // Set the default max thread number to 100 to limit
    // the number of concurrent requests so that Thrfit HTTP server doesn't OOM easily.
    // Jetty set the default max thread number to 250, if we don't set it.
    //
    // Our default min thread number 2 is the same as that used by Jetty.
    int minThreads = conf.getInt(HTTP_MIN_THREADS, 2);
    int maxThreads = conf.getInt(HTTP_MAX_THREADS, 100);
    QueuedThreadPool threadPool = new QueuedThreadPool(maxThreads);
    threadPool.setMinThreads(minThreads);
    httpServer = new Server(threadPool);
    // Context handler
    ServletContextHandler ctxHandler = new ServletContextHandler(httpServer, "/", ServletContextHandler.SESSIONS);
    ctxHandler.addServlet(new ServletHolder(thriftHttpServlet), "/*");
    // set up Jetty and run the embedded server
    HttpConfiguration httpConfig = new HttpConfiguration();
    httpConfig.setSecureScheme("https");
    httpConfig.setSecurePort(listenPort);
    httpConfig.setHeaderCacheSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
    httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
    httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
    httpConfig.setSendServerVersion(false);
    httpConfig.setSendDateHeader(false);
    ServerConnector serverConnector;
    if (conf.getBoolean(THRIFT_SSL_ENABLED, false)) {
        HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
        httpsConfig.addCustomizer(new SecureRequestCustomizer());
        SslContextFactory sslCtxFactory = new SslContextFactory();
        String keystore = conf.get(THRIFT_SSL_KEYSTORE_STORE);
        String password = HBaseConfiguration.getPassword(conf, THRIFT_SSL_KEYSTORE_PASSWORD, null);
        String keyPassword = HBaseConfiguration.getPassword(conf, THRIFT_SSL_KEYSTORE_KEYPASSWORD, password);
        sslCtxFactory.setKeyStorePath(keystore);
        sslCtxFactory.setKeyStorePassword(password);
        sslCtxFactory.setKeyManagerPassword(keyPassword);
        String[] excludeCiphers = conf.getStrings(THRIFT_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
        if (excludeCiphers.length != 0) {
            sslCtxFactory.setExcludeCipherSuites(excludeCiphers);
        }
        String[] includeCiphers = conf.getStrings(THRIFT_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
        if (includeCiphers.length != 0) {
            sslCtxFactory.setIncludeCipherSuites(includeCiphers);
        }
        // Disable SSLv3 by default due to "Poodle" Vulnerability - CVE-2014-3566
        String[] excludeProtocols = conf.getStrings(THRIFT_SSL_EXCLUDE_PROTOCOLS, "SSLv3");
        if (excludeProtocols.length != 0) {
            sslCtxFactory.setExcludeProtocols(excludeProtocols);
        }
        String[] includeProtocols = conf.getStrings(THRIFT_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY);
        if (includeProtocols.length != 0) {
            sslCtxFactory.setIncludeProtocols(includeProtocols);
        }
        serverConnector = new ServerConnector(httpServer, new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig));
    } else {
        serverConnector = new ServerConnector(httpServer, new HttpConnectionFactory(httpConfig));
    }
    serverConnector.setPort(listenPort);
    String host = getBindAddress(conf).getHostAddress();
    serverConnector.setHost(host);
    httpServer.addConnector(serverConnector);
    httpServer.setStopAtShutdown(true);
    if (doAsEnabled) {
        ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    }
    LOG.info("Starting Thrift HTTP Server on " + Integer.toString(listenPort));
}
Also used : TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) TProcessor(org.apache.thrift.TProcessor) TThreadedSelectorServer(org.apache.thrift.server.TThreadedSelectorServer) TServer(org.apache.thrift.server.TServer) THsHaServer(org.apache.thrift.server.THsHaServer) TNonblockingServer(org.apache.thrift.server.TNonblockingServer) SaslServer(javax.security.sasl.SaslServer) ServletHolder(org.eclipse.jetty.servlet.ServletHolder) LogFactory(org.apache.commons.logging.LogFactory) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) TTransportFactory(org.apache.thrift.transport.TTransportFactory) SslContextFactory(org.eclipse.jetty.util.ssl.SslContextFactory) TServlet(org.apache.thrift.server.TServlet) SslContextFactory(org.eclipse.jetty.util.ssl.SslContextFactory) TProcessor(org.apache.thrift.TProcessor) QueuedThreadPool(org.eclipse.jetty.util.thread.QueuedThreadPool) ServletContextHandler(org.eclipse.jetty.servlet.ServletContextHandler)

Example 7 with TProtocolFactory

use of org.apache.thrift.protocol.TProtocolFactory in project hbase by apache.

the class ThriftServer method run.

@Override
public int run(String[] args) throws Exception {
    final Configuration conf = getConf();
    TServer server = null;
    Options options = getOptions();
    CommandLine cmd = parseArguments(conf, options, args);
    int workerThreads = 0;
    int selectorThreads = 0;
    // use unbounded queue by default
    int maxCallQueueSize = -1;
    /**
     * This is to please both bin/hbase and bin/hbase-daemon. hbase-daemon provides "start" and "stop" arguments hbase
     * should print the help if no argument is provided
     */
    List<?> argList = cmd.getArgList();
    if (cmd.hasOption("help") || !argList.contains("start") || argList.contains("stop")) {
        printUsage();
        return 1;
    }
    // Get address to bind
    String bindAddress;
    if (cmd.hasOption("bind")) {
        bindAddress = cmd.getOptionValue("bind");
        conf.set("hbase.thrift.info.bindAddress", bindAddress);
    } else {
        bindAddress = conf.get("hbase.thrift.info.bindAddress");
    }
    // Get read timeout
    int readTimeout = THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT;
    if (cmd.hasOption(READ_TIMEOUT_OPTION)) {
        try {
            readTimeout = Integer.parseInt(cmd.getOptionValue(READ_TIMEOUT_OPTION));
        } catch (NumberFormatException e) {
            throw new RuntimeException("Could not parse the value provided for the timeout option", e);
        }
    } else {
        readTimeout = conf.getInt(THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY, THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT);
    }
    // Get port to bind to
    int listenPort = 0;
    try {
        if (cmd.hasOption("port")) {
            listenPort = Integer.parseInt(cmd.getOptionValue("port"));
        } else {
            listenPort = conf.getInt("hbase.regionserver.thrift.port", DEFAULT_LISTEN_PORT);
        }
    } catch (NumberFormatException e) {
        throw new RuntimeException("Could not parse the value provided for the port option", e);
    }
    // Thrift's implementation uses '0' as a placeholder for 'use the default.'
    int backlog = conf.getInt(BACKLOG_CONF_KEY, 0);
    // Local hostname and user name,
    // used only if QOP is configured.
    String host = null;
    String name = null;
    UserProvider userProvider = UserProvider.instantiate(conf);
    // login the server principal (if using secure Hadoop)
    boolean securityEnabled = userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled();
    if (securityEnabled) {
        host = Strings.domainNamePointerToHostName(DNS.getDefaultHost(conf.get("hbase.thrift.dns.interface", "default"), conf.get("hbase.thrift.dns.nameserver", "default")));
        userProvider.login("hbase.thrift.keytab.file", "hbase.thrift.kerberos.principal", host);
    }
    UserGroupInformation realUser = userProvider.getCurrent().getUGI();
    String stringQop = conf.get(THRIFT_QOP_KEY);
    SaslUtil.QualityOfProtection qop = null;
    if (stringQop != null) {
        qop = SaslUtil.getQop(stringQop);
        if (!securityEnabled) {
            throw new IOException("Thrift server must" + " run in secure mode to support authentication");
        }
        // Extract the name from the principal
        name = SecurityUtil.getUserFromPrincipal(conf.get("hbase.thrift.kerberos.principal"));
    }
    boolean nonblocking = cmd.hasOption("nonblocking");
    boolean hsha = cmd.hasOption("hsha");
    boolean selector = cmd.hasOption("selector");
    ThriftMetrics metrics = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.TWO);
    final JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(conf, metrics.getSource());
    String implType = "threadpool";
    if (nonblocking) {
        implType = "nonblocking";
    } else if (hsha) {
        implType = "hsha";
    } else if (selector) {
        implType = "selector";
    }
    conf.set("hbase.regionserver.thrift.server.type", implType);
    conf.setInt("hbase.regionserver.thrift.port", listenPort);
    registerFilters(conf);
    // Construct correct ProtocolFactory
    boolean compact = cmd.hasOption("compact") || conf.getBoolean("hbase.regionserver.thrift.compact", false);
    TProtocolFactory protocolFactory = getTProtocolFactory(compact);
    final ThriftHBaseServiceHandler hbaseHandler = new ThriftHBaseServiceHandler(conf, userProvider);
    THBaseService.Iface handler = ThriftHBaseServiceHandler.newInstance(hbaseHandler, metrics);
    final THBaseService.Processor p = new THBaseService.Processor(handler);
    conf.setBoolean("hbase.regionserver.thrift.compact", compact);
    TProcessor processor = p;
    boolean framed = cmd.hasOption("framed") || conf.getBoolean("hbase.regionserver.thrift.framed", false) || nonblocking || hsha;
    TTransportFactory transportFactory = getTTransportFactory(qop, name, host, framed, conf.getInt("hbase.regionserver.thrift.framed.max_frame_size_in_mb", 2) * 1024 * 1024);
    InetSocketAddress inetSocketAddress = bindToPort(bindAddress, listenPort);
    conf.setBoolean("hbase.regionserver.thrift.framed", framed);
    if (qop != null) {
        // Create a processor wrapper, to get the caller
        processor = new TProcessor() {

            @Override
            public boolean process(TProtocol inProt, TProtocol outProt) throws TException {
                TSaslServerTransport saslServerTransport = (TSaslServerTransport) inProt.getTransport();
                SaslServer saslServer = saslServerTransport.getSaslServer();
                String principal = saslServer.getAuthorizationID();
                hbaseHandler.setEffectiveUser(principal);
                return p.process(inProt, outProt);
            }
        };
    }
    if (cmd.hasOption("w")) {
        workerThreads = Integer.parseInt(cmd.getOptionValue("w"));
    }
    if (cmd.hasOption("s")) {
        selectorThreads = Integer.parseInt(cmd.getOptionValue("s"));
    }
    if (cmd.hasOption("q")) {
        maxCallQueueSize = Integer.parseInt(cmd.getOptionValue("q"));
    }
    // check for user-defined info server port setting, if so override the conf
    try {
        if (cmd.hasOption("infoport")) {
            String val = cmd.getOptionValue("infoport");
            conf.setInt("hbase.thrift.info.port", Integer.parseInt(val));
            log.debug("Web UI port set to " + val);
        }
    } catch (NumberFormatException e) {
        log.error("Could not parse the value provided for the infoport option", e);
        printUsage();
        System.exit(1);
    }
    // Put up info server.
    int port = conf.getInt("hbase.thrift.info.port", 9095);
    if (port >= 0) {
        conf.setLong("startcode", System.currentTimeMillis());
        String a = conf.get("hbase.thrift.info.bindAddress", "0.0.0.0");
        InfoServer infoServer = new InfoServer("thrift", a, port, false, conf);
        infoServer.setAttribute("hbase.conf", conf);
        infoServer.start();
    }
    if (nonblocking) {
        server = getTNonBlockingServer(protocolFactory, processor, transportFactory, inetSocketAddress);
    } else if (hsha) {
        server = getTHsHaServer(protocolFactory, processor, transportFactory, workerThreads, maxCallQueueSize, inetSocketAddress, metrics);
    } else if (selector) {
        server = getTThreadedSelectorServer(protocolFactory, processor, transportFactory, workerThreads, selectorThreads, maxCallQueueSize, inetSocketAddress, metrics);
    } else {
        server = getTThreadPoolServer(protocolFactory, processor, transportFactory, workerThreads, inetSocketAddress, backlog, readTimeout, metrics);
    }
    final TServer tserver = server;
    realUser.doAs(new PrivilegedAction<Object>() {

        @Override
        public Object run() {
            pauseMonitor.start();
            try {
                tserver.serve();
                return null;
            } finally {
                pauseMonitor.stop();
            }
        }
    });
    // when tserver.stop eventually happens we'll get here.
    return 0;
}
Also used : TException(org.apache.thrift.TException) Options(org.apache.commons.cli.Options) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) TProcessor(org.apache.thrift.TProcessor) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) TServer(org.apache.thrift.server.TServer) InetSocketAddress(java.net.InetSocketAddress) SaslServer(javax.security.sasl.SaslServer) JvmPauseMonitor(org.apache.hadoop.hbase.util.JvmPauseMonitor) TProcessor(org.apache.thrift.TProcessor) UserProvider(org.apache.hadoop.hbase.security.UserProvider) TProtocol(org.apache.thrift.protocol.TProtocol) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) THBaseService(org.apache.hadoop.hbase.thrift2.generated.THBaseService) IOException(java.io.IOException) TTransportFactory(org.apache.thrift.transport.TTransportFactory) TSaslServerTransport(org.apache.thrift.transport.TSaslServerTransport) CommandLine(org.apache.commons.cli.CommandLine) ThriftMetrics(org.apache.hadoop.hbase.thrift.ThriftMetrics) InfoServer(org.apache.hadoop.hbase.http.InfoServer) SaslUtil(org.apache.hadoop.hbase.security.SaslUtil)

Example 8 with TProtocolFactory

use of org.apache.thrift.protocol.TProtocolFactory in project hive by apache.

the class ThriftDeserializer method initialize.

@Override
public void initialize(Configuration job, Properties tbl) throws SerDeException {
    try {
        // both the classname and the protocol name are Table properties
        // the only hardwired assumption is that records are fixed on a
        // per Table basis
        String className = tbl.getProperty(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_CLASS);
        Class<?> recordClass = job.getClassByName(className);
        String protoName = tbl.getProperty(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT);
        if (protoName == null) {
            protoName = "TBinaryProtocol";
        }
        // For backward compatibility
        protoName = protoName.replace("com.facebook.thrift.protocol", "org.apache.thrift.protocol");
        TProtocolFactory tp = TReflectionUtils.getProtocolFactoryByName(protoName);
        tsd = new ThriftByteStreamTypedSerDe(recordClass, tp, tp);
    } catch (Exception e) {
        throw new SerDeException(e);
    }
}
Also used : TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException)

Example 9 with TProtocolFactory

use of org.apache.thrift.protocol.TProtocolFactory in project hive by apache.

the class HiveMetaStore method startMetaStore.

/**
   * Start Metastore based on a passed {@link HadoopThriftAuthBridge}
   *
   * @param port
   * @param bridge
   * @param conf
   *          configuration overrides
   * @throws Throwable
   */
public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, HiveConf conf, Lock startLock, Condition startCondition, AtomicBoolean startedServing) throws Throwable {
    try {
        isMetaStoreRemote = true;
        // Server will create new threads up to max as necessary. After an idle
        // period, it will destroy threads to keep the number of threads in the
        // pool to min.
        long maxMessageSize = conf.getLongVar(HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE);
        int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS);
        int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS);
        boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE);
        boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT);
        boolean useCompactProtocol = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_COMPACT_PROTOCOL);
        boolean useSSL = conf.getBoolVar(ConfVars.HIVE_METASTORE_USE_SSL);
        useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL);
        TProcessor processor;
        TTransportFactory transFactory;
        final TProtocolFactory protocolFactory;
        final TProtocolFactory inputProtoFactory;
        if (useCompactProtocol) {
            protocolFactory = new TCompactProtocol.Factory();
            inputProtoFactory = new TCompactProtocol.Factory(maxMessageSize, maxMessageSize);
        } else {
            protocolFactory = new TBinaryProtocol.Factory();
            inputProtoFactory = new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize);
        }
        HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, false);
        IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf);
        TServerSocket serverSocket = null;
        if (useSasl) {
            // we are in secure mode.
            if (useFramedTransport) {
                throw new HiveMetaException("Framed transport is not supported with SASL enabled.");
            }
            saslServer = bridge.createServer(conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE), conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL));
            // Start delegation token manager
            delegationTokenManager = new HiveDelegationTokenManager();
            delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler, ServerMode.METASTORE);
            saslServer.setSecretManager(delegationTokenManager.getSecretManager());
            transFactory = saslServer.createTransportFactory(MetaStoreUtils.getMetaStoreSaslProperties(conf));
            processor = saslServer.wrapProcessor(new ThriftHiveMetastore.Processor<IHMSHandler>(handler));
            serverSocket = HiveAuthUtils.getServerSocket(null, port);
            LOG.info("Starting DB backed MetaStore Server in Secure Mode");
        } else {
            // we are in unsecure mode.
            if (conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) {
                transFactory = useFramedTransport ? new ChainedTTransportFactory(new TFramedTransport.Factory(), new TUGIContainingTransport.Factory()) : new TUGIContainingTransport.Factory();
                processor = new TUGIBasedProcessor<IHMSHandler>(handler);
                LOG.info("Starting DB backed MetaStore Server with SetUGI enabled");
            } else {
                transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory();
                processor = new TSetIpAddressProcessor<IHMSHandler>(handler);
                LOG.info("Starting DB backed MetaStore Server");
            }
            // enable SSL support for HMS
            List<String> sslVersionBlacklist = new ArrayList<String>();
            for (String sslVersion : conf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) {
                sslVersionBlacklist.add(sslVersion);
            }
            if (!useSSL) {
                serverSocket = HiveAuthUtils.getServerSocket(null, port);
            } else {
                String keyStorePath = conf.getVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH).trim();
                if (keyStorePath.isEmpty()) {
                    throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname + " Not configured for SSL connection");
                }
                String keyStorePassword = ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname);
                serverSocket = HiveAuthUtils.getServerSSLSocket(null, port, keyStorePath, keyStorePassword, sslVersionBlacklist);
            }
        }
        if (tcpKeepAlive) {
            serverSocket = new TServerSocketKeepAlive(serverSocket);
        }
        TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverSocket).processor(processor).transportFactory(transFactory).protocolFactory(protocolFactory).inputProtocolFactory(inputProtoFactory).minWorkerThreads(minWorkerThreads).maxWorkerThreads(maxWorkerThreads);
        TServer tServer = new TThreadPoolServer(args);
        TServerEventHandler tServerEventHandler = new TServerEventHandler() {

            @Override
            public void preServe() {
            }

            @Override
            public ServerContext createContext(TProtocol tProtocol, TProtocol tProtocol1) {
                try {
                    Metrics metrics = MetricsFactory.getInstance();
                    if (metrics != null) {
                        metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
                    }
                } catch (Exception e) {
                    LOG.warn("Error Reporting Metastore open connection to Metrics system", e);
                }
                return null;
            }

            @Override
            public void deleteContext(ServerContext serverContext, TProtocol tProtocol, TProtocol tProtocol1) {
                try {
                    Metrics metrics = MetricsFactory.getInstance();
                    if (metrics != null) {
                        metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
                    }
                } catch (Exception e) {
                    LOG.warn("Error Reporting Metastore close connection to Metrics system", e);
                }
                // If the IMetaStoreClient#close was called, HMSHandler#shutdown would have already
                // cleaned up thread local RawStore. Otherwise, do it now.
                cleanupRawStore();
            }

            @Override
            public void processContext(ServerContext serverContext, TTransport tTransport, TTransport tTransport1) {
            }
        };
        tServer.setServerEventHandler(tServerEventHandler);
        HMSHandler.LOG.info("Started the new metaserver on port [" + port + "]...");
        HMSHandler.LOG.info("Options.minWorkerThreads = " + minWorkerThreads);
        HMSHandler.LOG.info("Options.maxWorkerThreads = " + maxWorkerThreads);
        HMSHandler.LOG.info("TCP keepalive = " + tcpKeepAlive);
        if (startLock != null) {
            signalOtherThreadsToStart(tServer, startLock, startCondition, startedServing);
        }
        tServer.serve();
    } catch (Throwable x) {
        x.printStackTrace();
        HMSHandler.LOG.error(StringUtils.stringifyException(x));
        throw x;
    }
}
Also used : TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) TProcessor(org.apache.thrift.TProcessor) TServerEventHandler(org.apache.thrift.server.TServerEventHandler) TServer(org.apache.thrift.server.TServer) ArrayList(java.util.ArrayList) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) LoggerFactory(org.slf4j.LoggerFactory) TTransportFactory(org.apache.thrift.transport.TTransportFactory) MetricsFactory(org.apache.hadoop.hive.common.metrics.common.MetricsFactory) TCompactProtocol(org.apache.thrift.protocol.TCompactProtocol) TServerSocket(org.apache.thrift.transport.TServerSocket) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) TProcessor(org.apache.thrift.TProcessor) TProtocol(org.apache.thrift.protocol.TProtocol) TFramedTransport(org.apache.thrift.transport.TFramedTransport) TUGIContainingTransport(org.apache.hadoop.hive.thrift.TUGIContainingTransport) TTransportFactory(org.apache.thrift.transport.TTransportFactory) JDOException(javax.jdo.JDOException) LogInitializationException(org.apache.hadoop.hive.common.LogUtils.LogInitializationException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) TBinaryProtocol(org.apache.thrift.protocol.TBinaryProtocol) ServerContext(org.apache.thrift.server.ServerContext) HiveDelegationTokenManager(org.apache.hadoop.hive.thrift.HiveDelegationTokenManager) TTransport(org.apache.thrift.transport.TTransport) TThreadPoolServer(org.apache.thrift.server.TThreadPoolServer)

Aggregations

TProtocolFactory (org.apache.thrift.protocol.TProtocolFactory)9 TProcessor (org.apache.thrift.TProcessor)5 SaslServer (javax.security.sasl.SaslServer)3 SerDeException (org.apache.hadoop.hive.serde2.SerDeException)3 TException (org.apache.thrift.TException)3 TBinaryProtocol (org.apache.thrift.protocol.TBinaryProtocol)3 TProtocol (org.apache.thrift.protocol.TProtocol)3 TServer (org.apache.thrift.server.TServer)3 TTransportFactory (org.apache.thrift.transport.TTransportFactory)3 IOException (java.io.IOException)2 InetSocketAddress (java.net.InetSocketAddress)2 ArrayList (java.util.ArrayList)2 ExecutorService (java.util.concurrent.ExecutorService)2 LogFactory (org.apache.commons.logging.LogFactory)2 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)2 TCompactProtocol (org.apache.thrift.protocol.TCompactProtocol)2 THsHaServer (org.apache.thrift.server.THsHaServer)2 TNonblockingServer (org.apache.thrift.server.TNonblockingServer)2 TServlet (org.apache.thrift.server.TServlet)2 TThreadedSelectorServer (org.apache.thrift.server.TThreadedSelectorServer)2