Search in sources :

Example 1 with InfoServer

use of org.apache.hadoop.hbase.http.InfoServer in project hbase by apache.

the class HRegionServer method putUpWebUI.

/**
   * Puts up the webui.
   * @return Returns final port -- maybe different from what we started with.
   * @throws IOException
   */
private int putUpWebUI() throws IOException {
    int port = this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT);
    String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0");
    if (this instanceof HMaster) {
        port = conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT);
        addr = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0");
    }
    // -1 is for disabling info server
    if (port < 0)
        return port;
    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
        String msg = "Failed to start http info server. Address " + addr + " does not belong to this host. Correct configuration parameter: " + "hbase.regionserver.info.bindAddress";
        LOG.error(msg);
        throw new IOException(msg);
    }
    // check if auto port bind enabled
    boolean auto = this.conf.getBoolean(HConstants.REGIONSERVER_INFO_PORT_AUTO, false);
    while (true) {
        try {
            this.infoServer = new InfoServer(getProcessName(), addr, port, false, this.conf);
            infoServer.addServlet("dump", "/dump", getDumpServlet());
            configureInfoServer();
            this.infoServer.start();
            break;
        } catch (BindException e) {
            if (!auto) {
                // auto bind disabled throw BindException
                LOG.error("Failed binding http info server to port: " + port);
                throw e;
            }
            // auto bind enabled, try to use another port
            LOG.info("Failed binding http info server to port: " + port);
            port++;
        }
    }
    port = this.infoServer.getPort();
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port);
    int masterInfoPort = conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT);
    conf.setInt("hbase.master.info.port.orig", masterInfoPort);
    conf.setInt(HConstants.MASTER_INFO_PORT, port);
    return port;
}
Also used : HMaster(org.apache.hadoop.hbase.master.HMaster) BindException(java.net.BindException) InfoServer(org.apache.hadoop.hbase.http.InfoServer) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException)

Example 2 with InfoServer

use of org.apache.hadoop.hbase.http.InfoServer in project hbase by apache.

the class ThriftServer method doMain.

/**
   * Start up or shuts down the Thrift server, depending on the arguments.
   * @param args
   */
void doMain(final String[] args) throws Exception {
    processOptions(args);
    serverRunner = new ThriftServerRunner(conf);
    // Put up info server.
    int port = conf.getInt("hbase.thrift.info.port", 9095);
    if (port >= 0) {
        conf.setLong("startcode", System.currentTimeMillis());
        String a = conf.get("hbase.thrift.info.bindAddress", "0.0.0.0");
        infoServer = new InfoServer("thrift", a, port, false, conf);
        infoServer.setAttribute("hbase.conf", conf);
        infoServer.start();
    }
    serverRunner.run();
}
Also used : InfoServer(org.apache.hadoop.hbase.http.InfoServer)

Example 3 with InfoServer

use of org.apache.hadoop.hbase.http.InfoServer in project hbase by apache.

the class RESTServer method main.

/**
   * The main method for the HBase rest server.
   * @param args command-line arguments
   * @throws Exception exception
   */
public static void main(String[] args) throws Exception {
    LOG.info("***** STARTING service '" + RESTServer.class.getSimpleName() + "' *****");
    VersionInfo.logVersion();
    Configuration conf = HBaseConfiguration.create();
    UserProvider userProvider = UserProvider.instantiate(conf);
    Pair<FilterHolder, Class<? extends ServletContainer>> pair = loginServerPrincipal(userProvider, conf);
    FilterHolder authFilter = pair.getFirst();
    RESTServlet servlet = RESTServlet.getInstance(conf, userProvider);
    parseCommandLine(args, servlet);
    // set up the Jersey servlet container for Jetty
    ResourceConfig application = new ResourceConfig().packages("org.apache.hadoop.hbase.rest").register(Jackson1Feature.class);
    ServletHolder sh = new ServletHolder(new ServletContainer(application));
    // Set the default max thread number to 100 to limit
    // the number of concurrent requests so that REST server doesn't OOM easily.
    // Jetty set the default max thread number to 250, if we don't set it.
    //
    // Our default min thread number 2 is the same as that used by Jetty.
    int maxThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MAX, 100);
    int minThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MIN, 2);
    // Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use
    // bounded {@link ArrayBlockingQueue} with the given size
    int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1);
    int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000);
    QueuedThreadPool threadPool = queueSize > 0 ? new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) : new QueuedThreadPool(maxThreads, minThreads, idleTimeout);
    Server server = new Server(threadPool);
    // Setup JMX
    MBeanContainer mbContainer = new MBeanContainer(ManagementFactory.getPlatformMBeanServer());
    server.addEventListener(mbContainer);
    server.addBean(mbContainer);
    String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0");
    int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080);
    HttpConfiguration httpConfig = new HttpConfiguration();
    httpConfig.setSecureScheme("https");
    httpConfig.setSecurePort(servicePort);
    httpConfig.setSendServerVersion(false);
    httpConfig.setSendDateHeader(false);
    ServerConnector serverConnector;
    if (conf.getBoolean(REST_SSL_ENABLED, false)) {
        HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
        httpsConfig.addCustomizer(new SecureRequestCustomizer());
        SslContextFactory sslCtxFactory = new SslContextFactory();
        String keystore = conf.get(REST_SSL_KEYSTORE_STORE);
        String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null);
        String keyPassword = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password);
        sslCtxFactory.setKeyStorePath(keystore);
        sslCtxFactory.setKeyStorePassword(password);
        sslCtxFactory.setKeyManagerPassword(keyPassword);
        String[] excludeCiphers = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
        if (excludeCiphers.length != 0) {
            sslCtxFactory.setExcludeCipherSuites(excludeCiphers);
        }
        String[] includeCiphers = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
        if (includeCiphers.length != 0) {
            sslCtxFactory.setIncludeCipherSuites(includeCiphers);
        }
        String[] excludeProtocols = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY);
        if (excludeProtocols.length != 0) {
            sslCtxFactory.setExcludeProtocols(excludeProtocols);
        }
        String[] includeProtocols = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY);
        if (includeProtocols.length != 0) {
            sslCtxFactory.setIncludeProtocols(includeProtocols);
        }
        serverConnector = new ServerConnector(server, new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig));
    } else {
        serverConnector = new ServerConnector(server, new HttpConnectionFactory(httpConfig));
    }
    int acceptQueueSize = servlet.getConfiguration().getInt(REST_CONNECTOR_ACCEPT_QUEUE_SIZE, -1);
    if (acceptQueueSize >= 0) {
        serverConnector.setAcceptQueueSize(acceptQueueSize);
    }
    serverConnector.setPort(servicePort);
    serverConnector.setHost(host);
    server.addConnector(serverConnector);
    server.setStopAtShutdown(true);
    // set up context
    ServletContextHandler ctxHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS);
    ctxHandler.addServlet(sh, PATH_SPEC_ANY);
    if (authFilter != null) {
        ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
    }
    // Load filters from configuration.
    String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES, ArrayUtils.EMPTY_STRING_ARRAY);
    for (String filter : filterClasses) {
        filter = filter.trim();
        ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
    }
    addCSRFFilter(ctxHandler, conf);
    HttpServerUtil.constrainHttpMethods(ctxHandler);
    // Put up info server.
    int port = conf.getInt("hbase.rest.info.port", 8085);
    if (port >= 0) {
        conf.setLong("startcode", System.currentTimeMillis());
        String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
        InfoServer infoServer = new InfoServer("rest", a, port, false, conf);
        infoServer.setAttribute("hbase.conf", conf);
        infoServer.start();
    }
    // start server
    server.start();
    server.join();
    LOG.info("***** STOPPING service '" + RESTServer.class.getSimpleName() + "' *****");
}
Also used : FilterHolder(org.eclipse.jetty.servlet.FilterHolder) HttpConfiguration(org.eclipse.jetty.server.HttpConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) InfoServer(org.apache.hadoop.hbase.http.InfoServer) Server(org.eclipse.jetty.server.Server) ServletHolder(org.eclipse.jetty.servlet.ServletHolder) HttpConfiguration(org.eclipse.jetty.server.HttpConfiguration) SslConnectionFactory(org.eclipse.jetty.server.SslConnectionFactory) ServerConnector(org.eclipse.jetty.server.ServerConnector) SslContextFactory(org.eclipse.jetty.util.ssl.SslContextFactory) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) UserProvider(org.apache.hadoop.hbase.security.UserProvider) QueuedThreadPool(org.eclipse.jetty.util.thread.QueuedThreadPool) MBeanContainer(org.eclipse.jetty.jmx.MBeanContainer) ResourceConfig(org.glassfish.jersey.server.ResourceConfig) SecureRequestCustomizer(org.eclipse.jetty.server.SecureRequestCustomizer) HttpConnectionFactory(org.eclipse.jetty.server.HttpConnectionFactory) ServletContainer(org.glassfish.jersey.servlet.ServletContainer) InfoServer(org.apache.hadoop.hbase.http.InfoServer) ServletContextHandler(org.eclipse.jetty.servlet.ServletContextHandler)

Example 4 with InfoServer

use of org.apache.hadoop.hbase.http.InfoServer in project hbase by apache.

the class HMaster method putUpJettyServer.

// return the actual infoPort, -1 means disable info server.
private int putUpJettyServer() throws IOException {
    if (!conf.getBoolean("hbase.master.infoserver.redirect", true)) {
        return -1;
    }
    final int infoPort = conf.getInt("hbase.master.info.port.orig", HConstants.DEFAULT_MASTER_INFOPORT);
    // -1 is for disabling info server, so no redirecting
    if (infoPort < 0 || infoServer == null) {
        return -1;
    }
    if (infoPort == infoServer.getPort()) {
        return infoPort;
    }
    final String addr = conf.get("hbase.master.info.bindAddress", "0.0.0.0");
    if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) {
        String msg = "Failed to start redirecting jetty server. Address " + addr + " does not belong to this host. Correct configuration parameter: " + "hbase.master.info.bindAddress";
        LOG.error(msg);
        throw new IOException(msg);
    }
    // TODO I'm pretty sure we could just add another binding to the InfoServer run by
    // the RegionServer and have it run the RedirectServlet instead of standing up
    // a second entire stack here.
    masterJettyServer = new Server();
    final ServerConnector connector = new ServerConnector(masterJettyServer);
    connector.setHost(addr);
    connector.setPort(infoPort);
    masterJettyServer.addConnector(connector);
    masterJettyServer.setStopAtShutdown(true);
    final String redirectHostname = shouldUseThisHostnameInstead() ? useThisHostnameInstead : null;
    final RedirectServlet redirect = new RedirectServlet(infoServer, redirectHostname);
    final WebAppContext context = new WebAppContext(null, "/", null, null, null, null, WebAppContext.NO_SESSIONS);
    context.addServlet(new ServletHolder(redirect), "/*");
    context.setServer(masterJettyServer);
    try {
        masterJettyServer.start();
    } catch (Exception e) {
        throw new IOException("Failed to start redirecting jetty server", e);
    }
    return connector.getLocalPort();
}
Also used : ServerConnector(org.eclipse.jetty.server.ServerConnector) WebAppContext(org.eclipse.jetty.webapp.WebAppContext) Server(org.eclipse.jetty.server.Server) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) InfoServer(org.apache.hadoop.hbase.http.InfoServer) RpcServer(org.apache.hadoop.hbase.ipc.RpcServer) ServletHolder(org.eclipse.jetty.servlet.ServletHolder) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) BypassCoprocessorException(org.apache.hadoop.hbase.coprocessor.BypassCoprocessorException) CoordinatedStateException(org.apache.hadoop.hbase.CoordinatedStateException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) MergeRegionException(org.apache.hadoop.hbase.exceptions.MergeRegionException) InvocationTargetException(java.lang.reflect.InvocationTargetException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) TableNotDisabledException(org.apache.hadoop.hbase.TableNotDisabledException) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) ServletException(javax.servlet.ServletException) PleaseHoldException(org.apache.hadoop.hbase.PleaseHoldException) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) UnknownRegionException(org.apache.hadoop.hbase.UnknownRegionException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) MasterNotRunningException(org.apache.hadoop.hbase.MasterNotRunningException) KeeperException(org.apache.zookeeper.KeeperException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException)

Example 5 with InfoServer

use of org.apache.hadoop.hbase.http.InfoServer in project hbase by apache.

the class ThriftServer method run.

@Override
public int run(String[] args) throws Exception {
    final Configuration conf = getConf();
    TServer server = null;
    Options options = getOptions();
    CommandLine cmd = parseArguments(conf, options, args);
    int workerThreads = 0;
    int selectorThreads = 0;
    // use unbounded queue by default
    int maxCallQueueSize = -1;
    /**
     * This is to please both bin/hbase and bin/hbase-daemon. hbase-daemon provides "start" and "stop" arguments hbase
     * should print the help if no argument is provided
     */
    List<?> argList = cmd.getArgList();
    if (cmd.hasOption("help") || !argList.contains("start") || argList.contains("stop")) {
        printUsage();
        return 1;
    }
    // Get address to bind
    String bindAddress;
    if (cmd.hasOption("bind")) {
        bindAddress = cmd.getOptionValue("bind");
        conf.set("hbase.thrift.info.bindAddress", bindAddress);
    } else {
        bindAddress = conf.get("hbase.thrift.info.bindAddress");
    }
    // Get read timeout
    int readTimeout = THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT;
    if (cmd.hasOption(READ_TIMEOUT_OPTION)) {
        try {
            readTimeout = Integer.parseInt(cmd.getOptionValue(READ_TIMEOUT_OPTION));
        } catch (NumberFormatException e) {
            throw new RuntimeException("Could not parse the value provided for the timeout option", e);
        }
    } else {
        readTimeout = conf.getInt(THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY, THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT);
    }
    // Get port to bind to
    int listenPort = 0;
    try {
        if (cmd.hasOption("port")) {
            listenPort = Integer.parseInt(cmd.getOptionValue("port"));
        } else {
            listenPort = conf.getInt("hbase.regionserver.thrift.port", DEFAULT_LISTEN_PORT);
        }
    } catch (NumberFormatException e) {
        throw new RuntimeException("Could not parse the value provided for the port option", e);
    }
    // Thrift's implementation uses '0' as a placeholder for 'use the default.'
    int backlog = conf.getInt(BACKLOG_CONF_KEY, 0);
    // Local hostname and user name,
    // used only if QOP is configured.
    String host = null;
    String name = null;
    UserProvider userProvider = UserProvider.instantiate(conf);
    // login the server principal (if using secure Hadoop)
    boolean securityEnabled = userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled();
    if (securityEnabled) {
        host = Strings.domainNamePointerToHostName(DNS.getDefaultHost(conf.get("hbase.thrift.dns.interface", "default"), conf.get("hbase.thrift.dns.nameserver", "default")));
        userProvider.login("hbase.thrift.keytab.file", "hbase.thrift.kerberos.principal", host);
    }
    UserGroupInformation realUser = userProvider.getCurrent().getUGI();
    String stringQop = conf.get(THRIFT_QOP_KEY);
    SaslUtil.QualityOfProtection qop = null;
    if (stringQop != null) {
        qop = SaslUtil.getQop(stringQop);
        if (!securityEnabled) {
            throw new IOException("Thrift server must" + " run in secure mode to support authentication");
        }
        // Extract the name from the principal
        name = SecurityUtil.getUserFromPrincipal(conf.get("hbase.thrift.kerberos.principal"));
    }
    boolean nonblocking = cmd.hasOption("nonblocking");
    boolean hsha = cmd.hasOption("hsha");
    boolean selector = cmd.hasOption("selector");
    ThriftMetrics metrics = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.TWO);
    final JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(conf, metrics.getSource());
    String implType = "threadpool";
    if (nonblocking) {
        implType = "nonblocking";
    } else if (hsha) {
        implType = "hsha";
    } else if (selector) {
        implType = "selector";
    }
    conf.set("hbase.regionserver.thrift.server.type", implType);
    conf.setInt("hbase.regionserver.thrift.port", listenPort);
    registerFilters(conf);
    // Construct correct ProtocolFactory
    boolean compact = cmd.hasOption("compact") || conf.getBoolean("hbase.regionserver.thrift.compact", false);
    TProtocolFactory protocolFactory = getTProtocolFactory(compact);
    final ThriftHBaseServiceHandler hbaseHandler = new ThriftHBaseServiceHandler(conf, userProvider);
    THBaseService.Iface handler = ThriftHBaseServiceHandler.newInstance(hbaseHandler, metrics);
    final THBaseService.Processor p = new THBaseService.Processor(handler);
    conf.setBoolean("hbase.regionserver.thrift.compact", compact);
    TProcessor processor = p;
    boolean framed = cmd.hasOption("framed") || conf.getBoolean("hbase.regionserver.thrift.framed", false) || nonblocking || hsha;
    TTransportFactory transportFactory = getTTransportFactory(qop, name, host, framed, conf.getInt("hbase.regionserver.thrift.framed.max_frame_size_in_mb", 2) * 1024 * 1024);
    InetSocketAddress inetSocketAddress = bindToPort(bindAddress, listenPort);
    conf.setBoolean("hbase.regionserver.thrift.framed", framed);
    if (qop != null) {
        // Create a processor wrapper, to get the caller
        processor = new TProcessor() {

            @Override
            public boolean process(TProtocol inProt, TProtocol outProt) throws TException {
                TSaslServerTransport saslServerTransport = (TSaslServerTransport) inProt.getTransport();
                SaslServer saslServer = saslServerTransport.getSaslServer();
                String principal = saslServer.getAuthorizationID();
                hbaseHandler.setEffectiveUser(principal);
                return p.process(inProt, outProt);
            }
        };
    }
    if (cmd.hasOption("w")) {
        workerThreads = Integer.parseInt(cmd.getOptionValue("w"));
    }
    if (cmd.hasOption("s")) {
        selectorThreads = Integer.parseInt(cmd.getOptionValue("s"));
    }
    if (cmd.hasOption("q")) {
        maxCallQueueSize = Integer.parseInt(cmd.getOptionValue("q"));
    }
    // check for user-defined info server port setting, if so override the conf
    try {
        if (cmd.hasOption("infoport")) {
            String val = cmd.getOptionValue("infoport");
            conf.setInt("hbase.thrift.info.port", Integer.parseInt(val));
            log.debug("Web UI port set to " + val);
        }
    } catch (NumberFormatException e) {
        log.error("Could not parse the value provided for the infoport option", e);
        printUsage();
        System.exit(1);
    }
    // Put up info server.
    int port = conf.getInt("hbase.thrift.info.port", 9095);
    if (port >= 0) {
        conf.setLong("startcode", System.currentTimeMillis());
        String a = conf.get("hbase.thrift.info.bindAddress", "0.0.0.0");
        InfoServer infoServer = new InfoServer("thrift", a, port, false, conf);
        infoServer.setAttribute("hbase.conf", conf);
        infoServer.start();
    }
    if (nonblocking) {
        server = getTNonBlockingServer(protocolFactory, processor, transportFactory, inetSocketAddress);
    } else if (hsha) {
        server = getTHsHaServer(protocolFactory, processor, transportFactory, workerThreads, maxCallQueueSize, inetSocketAddress, metrics);
    } else if (selector) {
        server = getTThreadedSelectorServer(protocolFactory, processor, transportFactory, workerThreads, selectorThreads, maxCallQueueSize, inetSocketAddress, metrics);
    } else {
        server = getTThreadPoolServer(protocolFactory, processor, transportFactory, workerThreads, inetSocketAddress, backlog, readTimeout, metrics);
    }
    final TServer tserver = server;
    realUser.doAs(new PrivilegedAction<Object>() {

        @Override
        public Object run() {
            pauseMonitor.start();
            try {
                tserver.serve();
                return null;
            } finally {
                pauseMonitor.stop();
            }
        }
    });
    // when tserver.stop eventually happens we'll get here.
    return 0;
}
Also used : TException(org.apache.thrift.TException) Options(org.apache.commons.cli.Options) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) TProcessor(org.apache.thrift.TProcessor) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) TServer(org.apache.thrift.server.TServer) InetSocketAddress(java.net.InetSocketAddress) SaslServer(javax.security.sasl.SaslServer) JvmPauseMonitor(org.apache.hadoop.hbase.util.JvmPauseMonitor) TProcessor(org.apache.thrift.TProcessor) UserProvider(org.apache.hadoop.hbase.security.UserProvider) TProtocol(org.apache.thrift.protocol.TProtocol) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) THBaseService(org.apache.hadoop.hbase.thrift2.generated.THBaseService) IOException(java.io.IOException) TTransportFactory(org.apache.thrift.transport.TTransportFactory) TSaslServerTransport(org.apache.thrift.transport.TSaslServerTransport) CommandLine(org.apache.commons.cli.CommandLine) ThriftMetrics(org.apache.hadoop.hbase.thrift.ThriftMetrics) InfoServer(org.apache.hadoop.hbase.http.InfoServer) SaslUtil(org.apache.hadoop.hbase.security.SaslUtil)

Aggregations

InfoServer (org.apache.hadoop.hbase.http.InfoServer)5 IOException (java.io.IOException)3 InterruptedIOException (java.io.InterruptedIOException)2 Configuration (org.apache.hadoop.conf.Configuration)2 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2 UserProvider (org.apache.hadoop.hbase.security.UserProvider)2 Server (org.eclipse.jetty.server.Server)2 ServerConnector (org.eclipse.jetty.server.ServerConnector)2 ServletHolder (org.eclipse.jetty.servlet.ServletHolder)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)1 BindException (java.net.BindException)1 InetSocketAddress (java.net.InetSocketAddress)1 UnknownHostException (java.net.UnknownHostException)1 ArrayBlockingQueue (java.util.concurrent.ArrayBlockingQueue)1 SaslServer (javax.security.sasl.SaslServer)1 ServletException (javax.servlet.ServletException)1 CommandLine (org.apache.commons.cli.CommandLine)1 Options (org.apache.commons.cli.Options)1 CoordinatedStateException (org.apache.hadoop.hbase.CoordinatedStateException)1 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)1