Search in sources :

Example 1 with Server

use of org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server in project hbase by apache.

the class RESTServer method run.

/**
 * Runs the REST server.
 */
public synchronized void run() throws Exception {
    Pair<FilterHolder, Class<? extends ServletContainer>> pair = loginServerPrincipal(userProvider, conf);
    FilterHolder authFilter = pair.getFirst();
    Class<? extends ServletContainer> containerClass = pair.getSecond();
    RESTServlet servlet = RESTServlet.getInstance(conf, userProvider);
    // set up the Jersey servlet container for Jetty
    ResourceConfig application = new ResourceConfig().packages("org.apache.hadoop.hbase.rest").register(JacksonJaxbJsonProvider.class);
    // Using our custom ServletContainer is tremendously important. This is what makes sure the
    // UGI.doAs() is done for the remoteUser, and calls are not made as the REST server itself.
    ServletContainer servletContainer = ReflectionUtils.newInstance(containerClass, application);
    ServletHolder sh = new ServletHolder(servletContainer);
    // Set the default max thread number to 100 to limit
    // the number of concurrent requests so that REST server doesn't OOM easily.
    // Jetty set the default max thread number to 250, if we don't set it.
    // 
    // Our default min thread number 2 is the same as that used by Jetty.
    int maxThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MAX, 100);
    int minThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MIN, 2);
    // Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use
    // bounded {@link ArrayBlockingQueue} with the given size
    int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1);
    int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000);
    QueuedThreadPool threadPool = queueSize > 0 ? new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) : new QueuedThreadPool(maxThreads, minThreads, idleTimeout);
    this.server = new Server(threadPool);
    // Setup JMX
    MBeanContainer mbContainer = new MBeanContainer(ManagementFactory.getPlatformMBeanServer());
    server.addEventListener(mbContainer);
    server.addBean(mbContainer);
    String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0");
    int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080);
    int httpHeaderCacheSize = servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, DEFAULT_HTTP_HEADER_CACHE_SIZE);
    HttpConfiguration httpConfig = new HttpConfiguration();
    httpConfig.setSecureScheme("https");
    httpConfig.setSecurePort(servicePort);
    httpConfig.setHeaderCacheSize(httpHeaderCacheSize);
    httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
    httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
    httpConfig.setSendServerVersion(false);
    httpConfig.setSendDateHeader(false);
    ServerConnector serverConnector;
    if (conf.getBoolean(REST_SSL_ENABLED, false)) {
        HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
        httpsConfig.addCustomizer(new SecureRequestCustomizer());
        SslContextFactory sslCtxFactory = new SslContextFactory();
        String keystore = conf.get(REST_SSL_KEYSTORE_STORE);
        String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE);
        String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null);
        String keyPassword = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password);
        sslCtxFactory.setKeyStorePath(keystore);
        if (StringUtils.isNotBlank(keystoreType)) {
            sslCtxFactory.setKeyStoreType(keystoreType);
        }
        sslCtxFactory.setKeyStorePassword(password);
        sslCtxFactory.setKeyManagerPassword(keyPassword);
        String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE);
        if (StringUtils.isNotBlank(trustStore)) {
            sslCtxFactory.setTrustStorePath(trustStore);
        }
        String trustStorePassword = HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null);
        if (StringUtils.isNotBlank(trustStorePassword)) {
            sslCtxFactory.setTrustStorePassword(trustStorePassword);
        }
        String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE);
        if (StringUtils.isNotBlank(trustStoreType)) {
            sslCtxFactory.setTrustStoreType(trustStoreType);
        }
        String[] excludeCiphers = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
        if (excludeCiphers.length != 0) {
            sslCtxFactory.setExcludeCipherSuites(excludeCiphers);
        }
        String[] includeCiphers = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
        if (includeCiphers.length != 0) {
            sslCtxFactory.setIncludeCipherSuites(includeCiphers);
        }
        String[] excludeProtocols = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY);
        if (excludeProtocols.length != 0) {
            sslCtxFactory.setExcludeProtocols(excludeProtocols);
        }
        String[] includeProtocols = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY);
        if (includeProtocols.length != 0) {
            sslCtxFactory.setIncludeProtocols(includeProtocols);
        }
        serverConnector = new ServerConnector(server, new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig));
    } else {
        serverConnector = new ServerConnector(server, new HttpConnectionFactory(httpConfig));
    }
    int acceptQueueSize = servlet.getConfiguration().getInt(REST_CONNECTOR_ACCEPT_QUEUE_SIZE, -1);
    if (acceptQueueSize >= 0) {
        serverConnector.setAcceptQueueSize(acceptQueueSize);
    }
    serverConnector.setPort(servicePort);
    serverConnector.setHost(host);
    server.addConnector(serverConnector);
    server.setStopAtShutdown(true);
    // set up context
    ServletContextHandler ctxHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS);
    ctxHandler.addServlet(sh, PATH_SPEC_ANY);
    if (authFilter != null) {
        ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
    }
    // Load filters from configuration.
    String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES, GzipFilter.class.getName());
    for (String filter : filterClasses) {
        filter = filter.trim();
        ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
    }
    addCSRFFilter(ctxHandler, conf);
    addClickjackingPreventionFilter(ctxHandler, conf);
    addSecurityHeadersFilter(ctxHandler, conf);
    HttpServerUtil.constrainHttpMethods(ctxHandler, servlet.getConfiguration().getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT));
    // Put up info server.
    int port = conf.getInt("hbase.rest.info.port", 8085);
    if (port >= 0) {
        conf.setLong("startcode", EnvironmentEdgeManager.currentTime());
        String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
        this.infoServer = new InfoServer("rest", a, port, false, conf);
        this.infoServer.setAttribute("hbase.conf", conf);
        this.infoServer.start();
    }
    // start server
    server.start();
}
Also used : FilterHolder(org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.FilterHolder) SecureRequestCustomizer(org.apache.hbase.thirdparty.org.eclipse.jetty.server.SecureRequestCustomizer) Server(org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server) InfoServer(org.apache.hadoop.hbase.http.InfoServer) HttpConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConnectionFactory) ServletHolder(org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder) HttpConfiguration(org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConfiguration) SslConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.SslConnectionFactory) GzipFilter(org.apache.hadoop.hbase.rest.filter.GzipFilter) ServerConnector(org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector) SslContextFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.util.ssl.SslContextFactory) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) QueuedThreadPool(org.apache.hbase.thirdparty.org.eclipse.jetty.util.thread.QueuedThreadPool) ServletContainer(org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer) MBeanContainer(org.apache.hbase.thirdparty.org.eclipse.jetty.jmx.MBeanContainer) InfoServer(org.apache.hadoop.hbase.http.InfoServer) ResourceConfig(org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig) ServletContextHandler(org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletContextHandler)

Example 2 with Server

use of org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server in project hbase by apache.

the class HMaster method finishActiveMasterInitialization.

/**
 * Finish initialization of HMaster after becoming the primary master.
 * <p/>
 * The startup order is a bit complicated but very important, do not change it unless you know
 * what you are doing.
 * <ol>
 * <li>Initialize file system based components - file system manager, wal manager, table
 * descriptors, etc</li>
 * <li>Publish cluster id</li>
 * <li>Here comes the most complicated part - initialize server manager, assignment manager and
 * region server tracker
 * <ol type='i'>
 * <li>Create server manager</li>
 * <li>Create master local region</li>
 * <li>Create procedure executor, load the procedures, but do not start workers. We will start it
 * later after we finish scheduling SCPs to avoid scheduling duplicated SCPs for the same
 * server</li>
 * <li>Create assignment manager and start it, load the meta region state, but do not load data
 * from meta region</li>
 * <li>Start region server tracker, construct the online servers set and find out dead servers and
 * schedule SCP for them. The online servers will be constructed by scanning zk, and we will also
 * scan the wal directory to find out possible live region servers, and the differences between
 * these two sets are the dead servers</li>
 * </ol>
 * </li>
 * <li>If this is a new deploy, schedule a InitMetaProcedure to initialize meta</li>
 * <li>Start necessary service threads - balancer, catalog janitor, executor services, and also
 * the procedure executor, etc. Notice that the balancer must be created first as assignment
 * manager may use it when assigning regions.</li>
 * <li>Wait for meta to be initialized if necessary, start table state manager.</li>
 * <li>Wait for enough region servers to check-in</li>
 * <li>Let assignment manager load data from meta and construct region states</li>
 * <li>Start all other things such as chore services, etc</li>
 * </ol>
 * <p/>
 * Notice that now we will not schedule a special procedure to make meta online(unless the first
 * time where meta has not been created yet), we will rely on SCP to bring meta online.
 */
private void finishActiveMasterInitialization(MonitoredTask status) throws IOException, InterruptedException, KeeperException, ReplicationException {
    /*
     * We are active master now... go initialize components we need to run.
     */
    status.setStatus("Initializing Master file system");
    this.masterActiveTime = EnvironmentEdgeManager.currentTime();
    // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring.
    // always initialize the MemStoreLAB as we use a region to store data in master now, see
    // localStore.
    initializeMemStoreChunkCreator(null);
    this.fileSystemManager = new MasterFileSystem(conf);
    this.walManager = new MasterWalManager(this);
    // warm-up HTDs cache on master initialization
    if (preLoadTableDescriptors) {
        status.setStatus("Pre-loading table descriptors");
        this.tableDescriptors.getAll();
    }
    // Publish cluster ID; set it in Master too. The superclass RegionServer does this later but
    // only after it has checked in with the Master. At least a few tests ask Master for clusterId
    // before it has called its run method and before RegionServer has done the reportForDuty.
    ClusterId clusterId = fileSystemManager.getClusterId();
    status.setStatus("Publishing Cluster ID " + clusterId + " in ZooKeeper");
    ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
    this.clusterId = clusterId.toString();
    // hbase.write.hbck1.lock.file to false.
    if (this.conf.getBoolean("hbase.write.hbck1.lock.file", true)) {
        Pair<Path, FSDataOutputStream> result = null;
        try {
            result = HBaseFsck.checkAndMarkRunningHbck(this.conf, HBaseFsck.createLockRetryCounterFactory(this.conf).create());
        } finally {
            if (result != null) {
                Closeables.close(result.getSecond(), true);
            }
        }
    }
    status.setStatus("Initialize ServerManager and schedule SCP for crash servers");
    // The below two managers must be created before loading procedures, as they will be used during
    // loading.
    this.serverManager = createServerManager(this);
    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);
    if (!conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
        this.splitWALManager = new SplitWALManager(this);
    }
    // initialize master local region
    masterRegion = MasterRegionFactory.create(this);
    tryMigrateMetaLocationsFromZooKeeper();
    createProcedureExecutor();
    Map<Class<?>, List<Procedure<MasterProcedureEnv>>> procsByType = procedureExecutor.getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass()));
    // Create Assignment Manager
    this.assignmentManager = createAssignmentManager(this, masterRegion);
    this.assignmentManager.start();
    // TODO: TRSP can perform as the sub procedure for other procedures, so even if it is marked as
    // completed, it could still be in the procedure list. This is a bit strange but is another
    // story, need to verify the implementation for ProcedureExecutor and ProcedureStore.
    List<TransitRegionStateProcedure> ritList = procsByType.getOrDefault(TransitRegionStateProcedure.class, Collections.emptyList()).stream().filter(p -> !p.isFinished()).map(p -> (TransitRegionStateProcedure) p).collect(Collectors.toList());
    this.assignmentManager.setupRIT(ritList);
    // Start RegionServerTracker with listing of servers found with exiting SCPs -- these should
    // be registered in the deadServers set -- and with the list of servernames out on the
    // filesystem that COULD BE 'alive' (we'll schedule SCPs for each and let SCP figure it out).
    // We also pass dirs that are already 'splitting'... so we can do some checks down in tracker.
    // TODO: Generate the splitting and live Set in one pass instead of two as we currently do.
    this.regionServerTracker.upgrade(procsByType.getOrDefault(ServerCrashProcedure.class, Collections.emptyList()).stream().map(p -> (ServerCrashProcedure) p).map(p -> p.getServerName()).collect(Collectors.toSet()), walManager.getLiveServersFromWALDir(), walManager.getSplittingServersFromWALDir());
    // This manager must be accessed AFTER hbase:meta is confirmed on line..
    this.tableStateManager = new TableStateManager(this);
    status.setStatus("Initializing ZK system trackers");
    initializeZKBasedSystemTrackers();
    status.setStatus("Loading last flushed sequence id of regions");
    try {
        this.serverManager.loadLastFlushedSequenceIds();
    } catch (IOException e) {
        LOG.info("Failed to load last flushed sequence id of regions" + " from file system", e);
    }
    // Set ourselves as active Master now our claim has succeeded up in zk.
    this.activeMaster = true;
    // Start the Zombie master detector after setting master as active, see HBASE-21535
    Thread zombieDetector = new Thread(new MasterInitializationMonitor(this), "ActiveMasterInitializationMonitor-" + EnvironmentEdgeManager.currentTime());
    zombieDetector.setDaemon(true);
    zombieDetector.start();
    if (!maintenanceMode) {
        // default with quota support, avoiding if user specifically asks to not load this Observer.
        if (QuotaUtil.isQuotaEnabled(conf)) {
            updateConfigurationForQuotasObserver(conf);
        }
        // initialize master side coprocessors before we start handling requests
        status.setStatus("Initializing master coprocessors");
        this.cpHost = new MasterCoprocessorHost(this, this.conf);
    } else {
        // start an in process region server for carrying system regions
        maintenanceRegionServer = JVMClusterUtil.createRegionServerThread(getConfiguration(), HRegionServer.class, 0);
        maintenanceRegionServer.start();
    }
    // Checking if meta needs initializing.
    status.setStatus("Initializing meta table if this is a new deploy");
    InitMetaProcedure initMetaProc = null;
    // Print out state of hbase:meta on startup; helps debugging.
    if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) {
        Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream().filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
        initMetaProc = optProc.orElseGet(() -> {
            // schedule an init meta procedure if meta has not been deployed yet
            InitMetaProcedure temp = new InitMetaProcedure();
            procedureExecutor.submitProcedure(temp);
            return temp;
        });
    }
    // initialize load balancer
    this.balancer.setMasterServices(this);
    this.balancer.initialize();
    this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
    // start up all service threads.
    status.setStatus("Initializing master service threads");
    startServiceThreads();
    // wait meta to be initialized after we start procedure executor
    if (initMetaProc != null) {
        initMetaProc.await();
    }
    // Wake up this server to check in
    sleeper.skipSleepCycle();
    // Wait for region servers to report in.
    // With this as part of master initialization, it precludes our being able to start a single
    // server that is both Master and RegionServer. Needs more thought. TODO.
    String statusStr = "Wait for region servers to report in";
    status.setStatus(statusStr);
    LOG.info(Objects.toString(status));
    waitForRegionServers(status);
    // Check if master is shutting down because issue initializing regionservers or balancer.
    if (isStopped()) {
        return;
    }
    status.setStatus("Starting assignment manager");
    // available. That's what waitForMetaOnline does.
    if (!waitForMetaOnline()) {
        return;
    }
    TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME);
    final ColumnFamilyDescriptor tableFamilyDesc = metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY);
    final ColumnFamilyDescriptor replBarrierFamilyDesc = metaDescriptor.getColumnFamily(HConstants.REPLICATION_BARRIER_FAMILY);
    this.assignmentManager.joinCluster();
    // The below depends on hbase:meta being online.
    this.assignmentManager.processOfflineRegions();
    // this must be called after the above processOfflineRegions to prevent race
    this.assignmentManager.wakeMetaLoadedEvent();
    // first.
    if (conf.get(HConstants.META_REPLICAS_NUM) != null) {
        int replicasNumInConf = conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM);
        TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME);
        if (metaDesc.getRegionReplication() != replicasNumInConf) {
            // it is possible that we already have some replicas before upgrading, so we must set the
            // region replication number in meta TableDescriptor directly first, without creating a
            // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas.
            int existingReplicasCount = assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size();
            if (existingReplicasCount > metaDesc.getRegionReplication()) {
                LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount);
                metaDesc = TableDescriptorBuilder.newBuilder(metaDesc).setRegionReplication(existingReplicasCount).build();
                tableDescriptors.update(metaDesc);
            }
            // check again, and issue a ModifyTableProcedure if needed
            if (metaDesc.getRegionReplication() != replicasNumInConf) {
                LOG.info("The {} config is {} while the replica count in TableDescriptor is {}" + " for hbase:meta, altering...", HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication());
                procedureExecutor.submitProcedure(new ModifyTableProcedure(procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc).setRegionReplication(replicasNumInConf).build(), null, metaDesc, false));
            }
        }
    }
    // Initialize after meta is up as below scans meta
    FavoredNodesManager fnm = getFavoredNodesManager();
    if (fnm != null) {
        fnm.initializeFromMeta();
    }
    // set cluster status again after user regions are assigned
    this.balancer.updateClusterMetrics(getClusterMetricsWithoutCoprocessor());
    // Start balancer and meta catalog janitor after meta and regions have been assigned.
    status.setStatus("Starting balancer and catalog janitor");
    this.clusterStatusChore = new ClusterStatusChore(this, balancer);
    getChoreService().scheduleChore(clusterStatusChore);
    this.balancerChore = new BalancerChore(this);
    if (!disableBalancerChoreForTest) {
        getChoreService().scheduleChore(balancerChore);
    }
    if (regionNormalizerManager != null) {
        getChoreService().scheduleChore(regionNormalizerManager.getRegionNormalizerChore());
    }
    this.catalogJanitorChore = new CatalogJanitor(this);
    getChoreService().scheduleChore(catalogJanitorChore);
    this.hbckChore = new HbckChore(this);
    getChoreService().scheduleChore(hbckChore);
    this.serverManager.startChore();
    // Only for rolling upgrade, where we need to migrate the data in namespace table to meta table.
    if (!waitForNamespaceOnline()) {
        return;
    }
    status.setStatus("Starting cluster schema service");
    try {
        initClusterSchemaService();
    } catch (IllegalStateException e) {
        if (e.getCause() != null && e.getCause() instanceof NoSuchColumnFamilyException && tableFamilyDesc == null && replBarrierFamilyDesc == null) {
            LOG.info("ClusterSchema service could not be initialized. This is " + "expected during HBase 1 to 2 upgrade", e);
        } else {
            throw e;
        }
    }
    if (this.cpHost != null) {
        try {
            this.cpHost.preMasterInitialization();
        } catch (IOException e) {
            LOG.error("Coprocessor preMasterInitialization() hook failed", e);
        }
    }
    status.markComplete("Initialization successful");
    LOG.info(String.format("Master has completed initialization %.3fsec", (EnvironmentEdgeManager.currentTime() - masterActiveTime) / 1000.0f));
    this.masterFinishedInitializationTime = EnvironmentEdgeManager.currentTime();
    configurationManager.registerObserver(this.balancer);
    configurationManager.registerObserver(this.hfileCleanerPool);
    configurationManager.registerObserver(this.logCleanerPool);
    configurationManager.registerObserver(this.hfileCleaner);
    configurationManager.registerObserver(this.logCleaner);
    configurationManager.registerObserver(this.regionsRecoveryConfigManager);
    // Set master as 'initialized'.
    setInitialized(true);
    if (tableFamilyDesc == null && replBarrierFamilyDesc == null) {
        // create missing CFs in meta table after master is set to 'initialized'.
        createMissingCFsInMetaDuringUpgrade(metaDescriptor);
        // services will be started during master init phase.
        throw new PleaseRestartMasterException("Aborting active master after missing" + " CFs are successfully added in meta. Subsequent active master " + "initialization should be uninterrupted");
    }
    if (maintenanceMode) {
        LOG.info("Detected repair mode, skipping final initialization steps.");
        return;
    }
    assignmentManager.checkIfShouldMoveSystemRegionAsync();
    status.setStatus("Starting quota manager");
    initQuotaManager();
    if (QuotaUtil.isQuotaEnabled(conf)) {
        // Create the quota snapshot notifier
        spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier();
        spaceQuotaSnapshotNotifier.initialize(getConnection());
        this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics());
        // Start the chore to read the region FS space reports and act on them
        getChoreService().scheduleChore(quotaObserverChore);
        this.snapshotQuotaChore = new SnapshotQuotaObserverChore(this, getMasterMetrics());
        // Start the chore to read snapshots and add their usage to table/NS quotas
        getChoreService().scheduleChore(snapshotQuotaChore);
    }
    final SlowLogMasterService slowLogMasterService = new SlowLogMasterService(conf, this);
    slowLogMasterService.init();
    // clear the dead servers with same host name and port of online server because we are not
    // removing dead server with same hostname and port of rs which is trying to check in before
    // master initialization. See HBASE-5916.
    this.serverManager.clearDeadServersWithSameHostNameAndPortOfOnlineServer();
    // Check and set the znode ACLs if needed in case we are overtaking a non-secure configuration
    status.setStatus("Checking ZNode ACLs");
    zooKeeper.checkAndSetZNodeAcls();
    status.setStatus("Initializing MOB Cleaner");
    initMobCleaner();
    status.setStatus("Calling postStartMaster coprocessors");
    if (this.cpHost != null) {
        // don't let cp initialization errors kill the master
        try {
            this.cpHost.postStartMaster();
        } catch (IOException ioe) {
            LOG.error("Coprocessor postStartMaster() hook failed", ioe);
        }
    }
    zombieDetector.interrupt();
    /*
     * After master has started up, lets do balancer post startup initialization. Since this runs
     * in activeMasterManager thread, it should be fine.
     */
    long start = EnvironmentEdgeManager.currentTime();
    this.balancer.postMasterStartupInitialize();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Balancer post startup initialization complete, took " + ((EnvironmentEdgeManager.currentTime() - start) / 1000) + " seconds");
    }
    this.rollingUpgradeChore = new RollingUpgradeChore(this);
    getChoreService().scheduleChore(rollingUpgradeChore);
}
Also used : DisablePeerProcedure(org.apache.hadoop.hbase.master.replication.DisablePeerProcedure) LockManager(org.apache.hadoop.hbase.master.locking.LockManager) UserProvider(org.apache.hadoop.hbase.security.UserProvider) Server(org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server) RSGroupAdminEndpoint(org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint) GetRegionInfoResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) StringUtils(org.apache.commons.lang3.StringUtils) RetryCounterFactory(org.apache.hadoop.hbase.util.RetryCounterFactory) EnableTableProcedure(org.apache.hadoop.hbase.master.procedure.EnableTableProcedure) AddPeerProcedure(org.apache.hadoop.hbase.master.replication.AddPeerProcedure) TableDescriptorChecker(org.apache.hadoop.hbase.util.TableDescriptorChecker) Future(java.util.concurrent.Future) ProcedureExecutor(org.apache.hadoop.hbase.procedure2.ProcedureExecutor) NamedQueueRecorder(org.apache.hadoop.hbase.namequeues.NamedQueueRecorder) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) MobFileCleanerChore(org.apache.hadoop.hbase.mob.MobFileCleanerChore) ZNodePaths(org.apache.hadoop.hbase.zookeeper.ZNodePaths) ModifyTableProcedure(org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure) Closeables(org.apache.hbase.thirdparty.com.google.common.io.Closeables) EnumSet(java.util.EnumSet) MetaTableLocator(org.apache.hadoop.hbase.zookeeper.MetaTableLocator) Pair(org.apache.hadoop.hbase.util.Pair) MasterRegionFactory(org.apache.hadoop.hbase.master.region.MasterRegionFactory) CommonFSUtils(org.apache.hadoop.hbase.util.CommonFSUtils) CellBuilderType(org.apache.hadoop.hbase.CellBuilderType) WebAppContext(org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext) ModifyColumnFamilyStoreFileTrackerProcedure(org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyColumnFamilyStoreFileTrackerProcedure) ModifyTableStoreFileTrackerProcedure(org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyTableStoreFileTrackerProcedure) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) ExecutorType(org.apache.hadoop.hbase.executor.ExecutorType) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) BaseLoadBalancer(org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer) RSGroupBasedLoadBalancer(org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer) ReplicationLoadSource(org.apache.hadoop.hbase.replication.ReplicationLoadSource) HBASE_SPLIT_WAL_COORDINATED_BY_ZK(org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK) Descriptors(org.apache.hbase.thirdparty.com.google.protobuf.Descriptors) Constructor(java.lang.reflect.Constructor) ProcedureStoreListener(org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener) ReopenTableRegionsProcedure(org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure) TaskMonitor(org.apache.hadoop.hbase.monitoring.TaskMonitor) ServerTask(org.apache.hadoop.hbase.ServerTask) Option(org.apache.hadoop.hbase.ClusterMetrics.Option) TableName(org.apache.hadoop.hbase.TableName) ServletHolder(org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder) LoadBalancerTracker(org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker) Service(org.apache.hbase.thirdparty.com.google.protobuf.Service) Sets(org.apache.hbase.thirdparty.com.google.common.collect.Sets) IOException(java.io.IOException) ServerConnector(org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector) SlowLogMasterService(org.apache.hadoop.hbase.master.slowlog.SlowLogMasterService) MasterAddressSyncer(org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer) UnknownHostException(java.net.UnknownHostException) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) ExecutionException(java.util.concurrent.ExecutionException) RSGroupInfoManager(org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager) TableNotDisabledException(org.apache.hadoop.hbase.TableNotDisabledException) RemoteProcedureException(org.apache.hadoop.hbase.procedure2.RemoteProcedureException) RegionStateStore(org.apache.hadoop.hbase.master.assignment.RegionStateStore) NoSuchColumnFamilyException(org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) SpaceQuotaSnapshotNotifier(org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier) CoprocessorHost(org.apache.hadoop.hbase.coprocessor.CoprocessorHost) MasterStatusServlet(org.apache.hadoop.hbase.master.http.MasterStatusServlet) TruncateTableProcedure(org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure) MASTER_HOSTNAME_KEY(org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY) EnablePeerProcedure(org.apache.hadoop.hbase.master.replication.EnablePeerProcedure) MasterProcedureManagerHost(org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PleaseHoldException(org.apache.hadoop.hbase.PleaseHoldException) CellBuilderFactory(org.apache.hadoop.hbase.CellBuilderFactory) MergeTableRegionsProcedure(org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Maps(org.apache.hbase.thirdparty.com.google.common.collect.Maps) CatalogJanitor(org.apache.hadoop.hbase.master.janitor.CatalogJanitor) ReplicationPeerManager(org.apache.hadoop.hbase.master.replication.ReplicationPeerManager) Collection(java.util.Collection) HBaseInterfaceAudience(org.apache.hadoop.hbase.HBaseInterfaceAudience) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) Collectors(java.util.stream.Collectors) SnapshotQuotaObserverChore(org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore) Objects(java.util.Objects) FutureUtils(org.apache.hadoop.hbase.util.FutureUtils) UnknownRegionException(org.apache.hadoop.hbase.UnknownRegionException) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) MasterAddressTracker(org.apache.hadoop.hbase.zookeeper.MasterAddressTracker) LogCleaner(org.apache.hadoop.hbase.master.cleaner.LogCleaner) RollingUpgradeChore(org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore) MasterStoppedException(org.apache.hadoop.hbase.exceptions.MasterStoppedException) HConstants(org.apache.hadoop.hbase.HConstants) QuotaTableUtil(org.apache.hadoop.hbase.quotas.QuotaTableUtil) HBASE_MASTER_LOGCLEANER_PLUGINS(org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS) QuotaUtil(org.apache.hadoop.hbase.quotas.QuotaUtil) TransitPeerSyncReplicationStateProcedure(org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure) LinkedList(java.util.LinkedList) RemovePeerProcedure(org.apache.hadoop.hbase.master.replication.RemovePeerProcedure) DeleteNamespaceProcedure(org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure) Threads(org.apache.hadoop.hbase.util.Threads) BalanceSwitchMode(org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode) MasterQuotaManager(org.apache.hadoop.hbase.quotas.MasterQuotaManager) Bytes(org.apache.hadoop.hbase.util.Bytes) RegionStates(org.apache.hadoop.hbase.master.assignment.RegionStates) Logger(org.slf4j.Logger) KeeperException(org.apache.zookeeper.KeeperException) DeleteTableProcedure(org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure) BalanceRequest(org.apache.hadoop.hbase.client.BalanceRequest) MasterSwitchType(org.apache.hadoop.hbase.client.MasterSwitchType) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) ProcedureSyncWait(org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait) MaintenanceLoadBalancer(org.apache.hadoop.hbase.master.balancer.MaintenanceLoadBalancer) Lists(org.apache.hbase.thirdparty.com.google.common.collect.Lists) RequestConverter(org.apache.hadoop.hbase.shaded.protobuf.RequestConverter) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) ZKClusterId(org.apache.hadoop.hbase.zookeeper.ZKClusterId) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) TransitRegionStateProcedure(org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure) HBaseServerBase(org.apache.hadoop.hbase.HBaseServerBase) ServerCrashProcedure(org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure) Comparator(java.util.Comparator) MasterDumpServlet(org.apache.hadoop.hbase.master.http.MasterDumpServlet) Arrays(java.util.Arrays) NormalizeTableFilterParams(org.apache.hadoop.hbase.client.NormalizeTableFilterParams) UpdatePeerConfigProcedure(org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure) ProcedureStore(org.apache.hadoop.hbase.procedure2.store.ProcedureStore) InetAddress(java.net.InetAddress) InvalidFamilyOperationException(org.apache.hadoop.hbase.InvalidFamilyOperationException) SpaceViolationPolicy(org.apache.hadoop.hbase.quotas.SpaceViolationPolicy) MasterProcedureScheduler(org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler) SecurityConstants(org.apache.hadoop.hbase.security.SecurityConstants) RegionNormalizerManager(org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager) MasterProcedureConstants(org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants) Cell(org.apache.hadoop.hbase.Cell) SpaceQuotaSnapshot(org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot) HttpServlet(javax.servlet.http.HttpServlet) ZKUtil(org.apache.hadoop.hbase.zookeeper.ZKUtil) SnapshotCleanupTracker(org.apache.hadoop.hbase.zookeeper.SnapshotCleanupTracker) Set(java.util.Set) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) ReplicationHFileCleaner(org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) InvocationTargetException(java.lang.reflect.InvocationTargetException) IdLock(org.apache.hadoop.hbase.util.IdLock) ClusterMetricsBuilder(org.apache.hadoop.hbase.ClusterMetricsBuilder) NonceProcedureRunnable(org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable) ClusterId(org.apache.hadoop.hbase.ClusterId) Superusers(org.apache.hadoop.hbase.security.Superusers) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) ServerTaskBuilder(org.apache.hadoop.hbase.ServerTaskBuilder) DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK(org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK) AbstractPeerProcedure(org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) InterruptedIOException(java.io.InterruptedIOException) RegionStateNode(org.apache.hadoop.hbase.master.assignment.RegionStateNode) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MasterProcedureUtil(org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil) HFileArchiveUtil(org.apache.hadoop.hbase.util.HFileArchiveUtil) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) SnapshotManager(org.apache.hadoop.hbase.master.snapshot.SnapshotManager) InfoServer(org.apache.hadoop.hbase.http.InfoServer) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) ServerName(org.apache.hadoop.hbase.ServerName) SyncReplicationState(org.apache.hadoop.hbase.replication.SyncReplicationState) RetryCounter(org.apache.hadoop.hbase.util.RetryCounter) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) RegionStatesCount(org.apache.hadoop.hbase.client.RegionStatesCount) Scan(org.apache.hadoop.hbase.client.Scan) HBaseFsck(org.apache.hadoop.hbase.util.HBaseFsck) HttpServer(org.apache.hadoop.hbase.http.HttpServer) BalanceResponse(org.apache.hadoop.hbase.client.BalanceResponse) MasterFlushTableProcedureManager(org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager) SpaceQuotaStatus(org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus) CatalogFamilyFormat(org.apache.hadoop.hbase.CatalogFamilyFormat) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) RegionNormalizerTracker(org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) MasterRedirectServlet(org.apache.hadoop.hbase.master.http.MasterRedirectServlet) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) ClusterStatusChore(org.apache.hadoop.hbase.master.balancer.ClusterStatusChore) ReplicationPeerNotFoundException(org.apache.hadoop.hbase.ReplicationPeerNotFoundException) DirScanPool(org.apache.hadoop.hbase.master.cleaner.DirScanPool) RemoteProcedure(org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure) MetaLocationSyncer(org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer) ReplicationBarrierCleaner(org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner) RegionProcedureStore(org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore) SyncReplicationReplayWALManager(org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager) Path(org.apache.hadoop.fs.Path) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) CoprocessorRpcUtils(org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils) RestrictedApi(com.google.errorprone.annotations.RestrictedApi) MasterQuotasObserver(org.apache.hadoop.hbase.quotas.MasterQuotasObserver) QuotaObserverChore(org.apache.hadoop.hbase.quotas.QuotaObserverChore) InetSocketAddress(java.net.InetSocketAddress) List(java.util.List) CompactionState(org.apache.hadoop.hbase.client.CompactionState) BalancerChore(org.apache.hadoop.hbase.master.balancer.BalancerChore) PleaseRestartMasterException(org.apache.hadoop.hbase.PleaseRestartMasterException) SpaceQuotaSnapshotNotifierFactory(org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) MetaTableAccessor(org.apache.hadoop.hbase.MetaTableAccessor) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) InitMetaProcedure(org.apache.hadoop.hbase.master.procedure.InitMetaProcedure) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) MasterNotRunningException(org.apache.hadoop.hbase.MasterNotRunningException) ModifyRegionUtils(org.apache.hadoop.hbase.util.ModifyRegionUtils) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) HashMap(java.util.HashMap) TableState(org.apache.hadoop.hbase.client.TableState) LoadBalancerFactory(org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory) LockedResource(org.apache.hadoop.hbase.procedure2.LockedResource) SnapshotCleanerChore(org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore) RegionNormalizerFactory(org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory) RpcServer(org.apache.hadoop.hbase.ipc.RpcServer) ReplicationUtils(org.apache.hadoop.hbase.replication.ReplicationUtils) MasterRegion(org.apache.hadoop.hbase.master.region.MasterRegion) RSGroupUtil(org.apache.hadoop.hbase.rsgroup.RSGroupUtil) Iterator(java.util.Iterator) HBaseMarkers(org.apache.hadoop.hbase.log.HBaseMarkers) ProcedurePrepareLatch(org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch) VersionInfo(org.apache.hadoop.hbase.util.VersionInfo) Put(org.apache.hadoop.hbase.client.Put) FavoredNodesManager(org.apache.hadoop.hbase.favored.FavoredNodesManager) CreateTableProcedure(org.apache.hadoop.hbase.master.procedure.CreateTableProcedure) ProcedureEvent(org.apache.hadoop.hbase.procedure2.ProcedureEvent) MemoryBoundedLogMessageBuffer(org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer) TimeUnit(java.util.concurrent.TimeUnit) ReplicationLogCleaner(org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner) DisableTableProcedure(org.apache.hadoop.hbase.master.procedure.DisableTableProcedure) Addressing(org.apache.hadoop.hbase.util.Addressing) Collections(java.util.Collections) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) MobFileCompactionChore(org.apache.hadoop.hbase.mob.MobFileCompactionChore) InitMetaProcedure(org.apache.hadoop.hbase.master.procedure.InitMetaProcedure) SnapshotQuotaObserverChore(org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore) QuotaObserverChore(org.apache.hadoop.hbase.quotas.QuotaObserverChore) CatalogJanitor(org.apache.hadoop.hbase.master.janitor.CatalogJanitor) ServerCrashProcedure(org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure) SyncReplicationReplayWALManager(org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager) ModifyTableProcedure(org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure) ClusterStatusChore(org.apache.hadoop.hbase.master.balancer.ClusterStatusChore) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) SnapshotQuotaObserverChore(org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore) NoSuchColumnFamilyException(org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) List(java.util.List) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Path(org.apache.hadoop.fs.Path) ZKClusterId(org.apache.hadoop.hbase.zookeeper.ZKClusterId) ClusterId(org.apache.hadoop.hbase.ClusterId) BalancerChore(org.apache.hadoop.hbase.master.balancer.BalancerChore) FavoredNodesManager(org.apache.hadoop.hbase.favored.FavoredNodesManager) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) InterruptedIOException(java.io.InterruptedIOException) PleaseRestartMasterException(org.apache.hadoop.hbase.PleaseRestartMasterException) SlowLogMasterService(org.apache.hadoop.hbase.master.slowlog.SlowLogMasterService) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) RSGroupAdminEndpoint(org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) RollingUpgradeChore(org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore) TransitRegionStateProcedure(org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure)

Example 3 with Server

use of org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server in project hbase by apache.

the class MockHttpApiRule method before.

@Override
protected void before() throws Exception {
    handler = new MockHandler();
    server = new Server();
    final ServerConnector http = new ServerConnector(server);
    http.setHost("localhost");
    server.addConnector(http);
    server.setStopAtShutdown(true);
    server.setHandler(handler);
    server.setRequestLog(buildRequestLog());
    server.start();
}
Also used : ServerConnector(org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector) Server(org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server)

Example 4 with Server

use of org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server in project hbase by apache.

the class ThriftServer method setupServer.

/**
 * Setting up the thrift TServer
 */
protected void setupServer() throws Exception {
    // Construct correct ProtocolFactory
    TProtocolFactory protocolFactory = getProtocolFactory();
    ImplType implType = ImplType.getServerImpl(conf);
    TProcessor processorToUse = processor;
    // Construct correct TransportFactory
    TTransportFactory transportFactory;
    if (conf.getBoolean(FRAMED_CONF_KEY, FRAMED_CONF_DEFAULT) || implType.isAlwaysFramed) {
        if (qop != null) {
            throw new RuntimeException("Thrift server authentication" + " doesn't work with framed transport yet");
        }
        transportFactory = new TFramedTransport.Factory(conf.getInt(MAX_FRAME_SIZE_CONF_KEY, MAX_FRAME_SIZE_CONF_DEFAULT) * 1024 * 1024);
        LOG.debug("Using framed transport");
    } else if (qop == null) {
        transportFactory = new TTransportFactory();
    } else {
        // Extract the name from the principal
        String thriftKerberosPrincipal = conf.get(THRIFT_KERBEROS_PRINCIPAL_KEY);
        if (thriftKerberosPrincipal == null) {
            throw new IllegalArgumentException(THRIFT_KERBEROS_PRINCIPAL_KEY + " cannot be null");
        }
        String name = SecurityUtil.getUserFromPrincipal(thriftKerberosPrincipal);
        Map<String, String> saslProperties = SaslUtil.initSaslProperties(qop.name());
        TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory();
        saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties, new SaslRpcServer.SaslGssCallbackHandler() {

            @Override
            public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
                AuthorizeCallback ac = null;
                for (Callback callback : callbacks) {
                    if (callback instanceof AuthorizeCallback) {
                        ac = (AuthorizeCallback) callback;
                    } else {
                        throw new UnsupportedCallbackException(callback, "Unrecognized SASL GSSAPI Callback");
                    }
                }
                if (ac != null) {
                    String authid = ac.getAuthenticationID();
                    String authzid = ac.getAuthorizationID();
                    if (!authid.equals(authzid)) {
                        ac.setAuthorized(false);
                    } else {
                        ac.setAuthorized(true);
                        String userName = SecurityUtil.getUserFromPrincipal(authzid);
                        LOG.info("Effective user: {}", userName);
                        ac.setAuthorizedID(userName);
                    }
                }
            }
        });
        transportFactory = saslFactory;
        // Create a processor wrapper, to get the caller
        processorToUse = (inProt, outProt) -> {
            TSaslServerTransport saslServerTransport = (TSaslServerTransport) inProt.getTransport();
            SaslServer saslServer = saslServerTransport.getSaslServer();
            String principal = saslServer.getAuthorizationID();
            hbaseServiceHandler.setEffectiveUser(principal);
            processor.process(inProt, outProt);
        };
    }
    if (conf.get(BIND_CONF_KEY) != null && !implType.canSpecifyBindIP) {
        LOG.error("Server types {} don't support IP address binding at the moment. See " + "https://issues.apache.org/jira/browse/HBASE-2155 for details.", Joiner.on(", ").join(ImplType.serversThatCannotSpecifyBindIP()));
        throw new RuntimeException("-" + BIND_CONF_KEY + " not supported with " + implType);
    }
    InetSocketAddress inetSocketAddress = new InetSocketAddress(getBindAddress(conf), listenPort);
    if (implType == ImplType.HS_HA || implType == ImplType.NONBLOCKING || implType == ImplType.THREADED_SELECTOR) {
        TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(inetSocketAddress);
        if (implType == ImplType.NONBLOCKING) {
            tserver = getTNonBlockingServer(serverTransport, protocolFactory, processorToUse, transportFactory, inetSocketAddress);
        } else if (implType == ImplType.HS_HA) {
            tserver = getTHsHaServer(serverTransport, protocolFactory, processorToUse, transportFactory, inetSocketAddress);
        } else {
            // THREADED_SELECTOR
            tserver = getTThreadedSelectorServer(serverTransport, protocolFactory, processorToUse, transportFactory, inetSocketAddress);
        }
        LOG.info("starting HBase {} server on {}", implType.simpleClassName(), Integer.toString(listenPort));
    } else if (implType == ImplType.THREAD_POOL) {
        this.tserver = getTThreadPoolServer(protocolFactory, processorToUse, transportFactory, inetSocketAddress);
    } else {
        throw new AssertionError("Unsupported Thrift server implementation: " + implType.simpleClassName());
    }
    // A sanity check that we instantiated the right type of server.
    if (tserver.getClass() != implType.serverClass) {
        throw new AssertionError("Expected to create Thrift server class " + implType.serverClass.getName() + " but got " + tserver.getClass().getName());
    }
}
Also used : THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SERVER_SOCKET_READ_TIMEOUT_KEY) UserProvider(org.apache.hadoop.hbase.security.UserProvider) Server(org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server) TThreadedSelectorServer(org.apache.thrift.server.TThreadedSelectorServer) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) UnsupportedCallbackException(javax.security.auth.callback.UnsupportedCallbackException) SslConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.SslConnectionFactory) TServer(org.apache.thrift.server.TServer) TBinaryProtocol(org.apache.thrift.protocol.TBinaryProtocol) THRIFT_SUPPORT_PROXYUSER_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SUPPORT_PROXYUSER_KEY) InetAddress(java.net.InetAddress) THRIFT_INFO_SERVER_BINDING_ADDRESS(org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_BINDING_ADDRESS) HttpConfiguration(org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConfiguration) THRIFT_KEYTAB_FILE_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_KEYTAB_FILE_KEY) ProxyUsers(org.apache.hadoop.security.authorize.ProxyUsers) DefaultParser(org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser) MIN_WORKERS_OPTION(org.apache.hadoop.hbase.thrift.Constants.MIN_WORKERS_OPTION) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) ParseFilter(org.apache.hadoop.hbase.filter.ParseFilter) BACKLOG_CONF_DEAFULT(org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_DEAFULT) THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SERVER_SOCKET_READ_TIMEOUT_DEFAULT) TCompactProtocol(org.apache.thrift.protocol.TCompactProtocol) Joiner(org.apache.hbase.thirdparty.com.google.common.base.Joiner) HTTP_MAX_THREADS_KEY(org.apache.hadoop.hbase.thrift.Constants.HTTP_MAX_THREADS_KEY) HTTP_MIN_THREADS_KEY(org.apache.hadoop.hbase.thrift.Constants.HTTP_MIN_THREADS_KEY) HttpServerUtil(org.apache.hadoop.hbase.http.HttpServerUtil) ExitCodeException(org.apache.hadoop.util.Shell.ExitCodeException) HttpConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConnectionFactory) TNonblockingServerTransport(org.apache.thrift.transport.TNonblockingServerTransport) THRIFT_INFO_SERVER_BINDING_ADDRESS_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_BINDING_ADDRESS_DEFAULT) BlockingQueue(java.util.concurrent.BlockingQueue) READ_TIMEOUT_OPTION(org.apache.hadoop.hbase.thrift.Constants.READ_TIMEOUT_OPTION) Strings(org.apache.hadoop.hbase.util.Strings) MAX_FRAME_SIZE_CONF_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.MAX_FRAME_SIZE_CONF_DEFAULT) JvmPauseMonitor(org.apache.hadoop.hbase.util.JvmPauseMonitor) QueuedThreadPool(org.apache.hbase.thirdparty.org.eclipse.jetty.util.thread.QueuedThreadPool) COMPACT_CONF_KEY(org.apache.hadoop.hbase.thrift.Constants.COMPACT_CONF_KEY) Callback(javax.security.auth.callback.Callback) TProcessor(org.apache.thrift.TProcessor) MAX_FRAME_SIZE_CONF_KEY(org.apache.hadoop.hbase.thrift.Constants.MAX_FRAME_SIZE_CONF_KEY) TServlet(org.apache.thrift.server.TServlet) Options(org.apache.hbase.thirdparty.org.apache.commons.cli.Options) DEFAULT_HTTP_MAX_HEADER_SIZE(org.apache.hadoop.hbase.thrift.Constants.DEFAULT_HTTP_MAX_HEADER_SIZE) ServletContextHandler(org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletContextHandler) COMPACT_CONF_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.COMPACT_CONF_DEFAULT) USE_HTTP_CONF_KEY(org.apache.hadoop.hbase.thrift.Constants.USE_HTTP_CONF_KEY) HTTP_MAX_THREADS_KEY_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.HTTP_MAX_THREADS_KEY_DEFAULT) FRAMED_CONF_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.FRAMED_CONF_DEFAULT) SaslUtil(org.apache.hadoop.hbase.security.SaslUtil) TSaslServerTransport(org.apache.thrift.transport.TSaslServerTransport) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) THRIFT_SSL_KEYSTORE_PASSWORD_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_PASSWORD_KEY) InfoServer(org.apache.hadoop.hbase.http.InfoServer) PORT_OPTION(org.apache.hadoop.hbase.thrift.Constants.PORT_OPTION) FRAMED_OPTION(org.apache.hadoop.hbase.thrift.Constants.FRAMED_OPTION) ServletHolder(org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder) KEEP_ALIVE_SEC_OPTION(org.apache.hadoop.hbase.thrift.Constants.KEEP_ALIVE_SEC_OPTION) IOException(java.io.IOException) HelpFormatter(org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter) HttpVersion(org.apache.hbase.thirdparty.org.eclipse.jetty.http.HttpVersion) THRIFT_SPNEGO_KEYTAB_FILE_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SPNEGO_KEYTAB_FILE_KEY) ServerConnector(org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) UnknownHostException(java.net.UnknownHostException) SELECTOR_NUM_OPTION(org.apache.hadoop.hbase.thrift.Constants.SELECTOR_NUM_OPTION) CommandLineParser(org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser) HTTP_MIN_THREADS_KEY_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.HTTP_MIN_THREADS_KEY_DEFAULT) THRIFT_SSL_KEYSTORE_TYPE_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_TYPE_DEFAULT) CommandLine(org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine) THRIFT_SSL_EXCLUDE_PROTOCOLS_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_EXCLUDE_PROTOCOLS_KEY) INFOPORT_OPTION(org.apache.hadoop.hbase.thrift.Constants.INFOPORT_OPTION) THsHaServer(org.apache.thrift.server.THsHaServer) THRIFT_INFO_SERVER_PORT(org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_PORT) LoggerFactory(org.slf4j.LoggerFactory) PORT_CONF_KEY(org.apache.hadoop.hbase.thrift.Constants.PORT_CONF_KEY) Splitter(org.apache.hbase.thirdparty.com.google.common.base.Splitter) COMPACT_OPTION(org.apache.hadoop.hbase.thrift.Constants.COMPACT_OPTION) THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT) THRIFT_SSL_INCLUDE_PROTOCOLS_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_INCLUDE_PROTOCOLS_KEY) SslContextFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.util.ssl.SslContextFactory) FRAMED_CONF_KEY(org.apache.hadoop.hbase.thrift.Constants.FRAMED_CONF_KEY) Configured(org.apache.hadoop.conf.Configured) Hbase(org.apache.hadoop.hbase.thrift.generated.Hbase) TFramedTransport(org.apache.thrift.transport.layered.TFramedTransport) THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY) THRIFT_FILTERS(org.apache.hadoop.hbase.thrift.Constants.THRIFT_FILTERS) TNonblockingServer(org.apache.thrift.server.TNonblockingServer) DEFAULT_BIND_ADDR(org.apache.hadoop.hbase.thrift.Constants.DEFAULT_BIND_ADDR) HBaseInterfaceAudience(org.apache.hadoop.hbase.HBaseInterfaceAudience) PrivilegedAction(java.security.PrivilegedAction) InetSocketAddress(java.net.InetSocketAddress) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Tool(org.apache.hadoop.util.Tool) THRIFT_SSL_KEYSTORE_STORE_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_STORE_KEY) TServerTransport(org.apache.thrift.transport.TServerTransport) DEFAULT_LISTEN_PORT(org.apache.hadoop.hbase.thrift.Constants.DEFAULT_LISTEN_PORT) List(java.util.List) MAX_WORKERS_OPTION(org.apache.hadoop.hbase.thrift.Constants.MAX_WORKERS_OPTION) AuthorizeCallback(javax.security.sasl.AuthorizeCallback) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) MAX_QUEUE_SIZE_OPTION(org.apache.hadoop.hbase.thrift.Constants.MAX_QUEUE_SIZE_OPTION) THRIFT_SSL_KEYSTORE_TYPE_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_KEYSTORE_TYPE_KEY) TTransportFactory(org.apache.thrift.transport.TTransportFactory) DNS(org.apache.hadoop.hbase.util.DNS) THRIFT_KERBEROS_PRINCIPAL_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_KERBEROS_PRINCIPAL_KEY) THRIFT_DNS_NAMESERVER_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_DNS_NAMESERVER_KEY) THRIFT_SPNEGO_PRINCIPAL_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SPNEGO_PRINCIPAL_KEY) THRIFT_SSL_INCLUDE_CIPHER_SUITES_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_INCLUDE_CIPHER_SUITES_KEY) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) THRIFT_INFO_SERVER_PORT_DEFAULT(org.apache.hadoop.hbase.thrift.Constants.THRIFT_INFO_SERVER_PORT_DEFAULT) ArrayUtils(org.apache.commons.lang3.ArrayUtils) SaslRpcServer(org.apache.hadoop.security.SaslRpcServer) TNonblockingServerSocket(org.apache.thrift.transport.TNonblockingServerSocket) THRIFT_SSL_ENABLED_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_ENABLED_KEY) SecurityUtil(org.apache.hadoop.hbase.security.SecurityUtil) THRIFT_QOP_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_QOP_KEY) THRIFT_HTTP_ALLOW_OPTIONS_METHOD(org.apache.hadoop.hbase.thrift.Constants.THRIFT_HTTP_ALLOW_OPTIONS_METHOD) ExecutorService(java.util.concurrent.ExecutorService) THRIFT_SSL_EXCLUDE_CIPHER_SUITES_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SSL_EXCLUDE_CIPHER_SUITES_KEY) Logger(org.slf4j.Logger) HBaseMarkers(org.apache.hadoop.hbase.log.HBaseMarkers) VersionInfo(org.apache.hadoop.hbase.util.VersionInfo) THRIFT_DNS_INTERFACE_KEY(org.apache.hadoop.hbase.thrift.Constants.THRIFT_DNS_INTERFACE_KEY) ToolRunner(org.apache.hadoop.util.ToolRunner) THRIFT_SELECTOR_NUM(org.apache.hadoop.hbase.thrift.Constants.THRIFT_SELECTOR_NUM) TimeUnit(java.util.concurrent.TimeUnit) TServerSocket(org.apache.thrift.transport.TServerSocket) SecureRequestCustomizer(org.apache.hbase.thirdparty.org.eclipse.jetty.server.SecureRequestCustomizer) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) BIND_OPTION(org.apache.hadoop.hbase.thrift.Constants.BIND_OPTION) BACKLOG_CONF_KEY(org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_KEY) BIND_CONF_KEY(org.apache.hadoop.hbase.thrift.Constants.BIND_CONF_KEY) SaslServer(javax.security.sasl.SaslServer) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) TNonblockingServerTransport(org.apache.thrift.transport.TNonblockingServerTransport) SaslServer(javax.security.sasl.SaslServer) InetSocketAddress(java.net.InetSocketAddress) SslConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.SslConnectionFactory) HttpConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConnectionFactory) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) LoggerFactory(org.slf4j.LoggerFactory) SslContextFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.util.ssl.SslContextFactory) TTransportFactory(org.apache.thrift.transport.TTransportFactory) TTransportFactory(org.apache.thrift.transport.TTransportFactory) AuthorizeCallback(javax.security.sasl.AuthorizeCallback) TSaslServerTransport(org.apache.thrift.transport.TSaslServerTransport) Callback(javax.security.auth.callback.Callback) AuthorizeCallback(javax.security.sasl.AuthorizeCallback) TProcessor(org.apache.thrift.TProcessor) TFramedTransport(org.apache.thrift.transport.layered.TFramedTransport) TNonblockingServerSocket(org.apache.thrift.transport.TNonblockingServerSocket) UnsupportedCallbackException(javax.security.auth.callback.UnsupportedCallbackException) Map(java.util.Map)

Example 5 with Server

use of org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server in project hbase by apache.

the class ThriftServer method setupHTTPServer.

/**
 * Setup an HTTP Server using Jetty to serve calls from THttpClient
 *
 * @throws IOException IOException
 */
protected void setupHTTPServer() throws IOException {
    TProtocolFactory protocolFactory = new TBinaryProtocol.Factory();
    TServlet thriftHttpServlet = createTServlet(protocolFactory);
    // Set the default max thread number to 100 to limit
    // the number of concurrent requests so that Thrfit HTTP server doesn't OOM easily.
    // Jetty set the default max thread number to 250, if we don't set it.
    // 
    // Our default min thread number 2 is the same as that used by Jetty.
    int minThreads = conf.getInt(HTTP_MIN_THREADS_KEY, conf.getInt(TBoundedThreadPoolServer.MIN_WORKER_THREADS_CONF_KEY, HTTP_MIN_THREADS_KEY_DEFAULT));
    int maxThreads = conf.getInt(HTTP_MAX_THREADS_KEY, conf.getInt(TBoundedThreadPoolServer.MAX_WORKER_THREADS_CONF_KEY, HTTP_MAX_THREADS_KEY_DEFAULT));
    QueuedThreadPool threadPool = new QueuedThreadPool(maxThreads);
    threadPool.setMinThreads(minThreads);
    httpServer = new Server(threadPool);
    // Context handler
    ServletContextHandler ctxHandler = new ServletContextHandler(httpServer, "/", ServletContextHandler.SESSIONS);
    ctxHandler.addServlet(new ServletHolder(thriftHttpServlet), "/*");
    HttpServerUtil.constrainHttpMethods(ctxHandler, conf.getBoolean(THRIFT_HTTP_ALLOW_OPTIONS_METHOD, THRIFT_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT));
    // set up Jetty and run the embedded server
    HttpConfiguration httpConfig = new HttpConfiguration();
    httpConfig.setSecureScheme("https");
    httpConfig.setSecurePort(listenPort);
    httpConfig.setHeaderCacheSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
    httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
    httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
    httpConfig.setSendServerVersion(false);
    httpConfig.setSendDateHeader(false);
    ServerConnector serverConnector;
    if (conf.getBoolean(THRIFT_SSL_ENABLED_KEY, false)) {
        HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
        httpsConfig.addCustomizer(new SecureRequestCustomizer());
        SslContextFactory sslCtxFactory = new SslContextFactory();
        String keystore = conf.get(THRIFT_SSL_KEYSTORE_STORE_KEY);
        String password = HBaseConfiguration.getPassword(conf, THRIFT_SSL_KEYSTORE_PASSWORD_KEY, null);
        String keyPassword = HBaseConfiguration.getPassword(conf, THRIFT_SSL_KEYSTORE_KEYPASSWORD_KEY, password);
        sslCtxFactory.setKeyStorePath(keystore);
        sslCtxFactory.setKeyStorePassword(password);
        sslCtxFactory.setKeyManagerPassword(keyPassword);
        sslCtxFactory.setKeyStoreType(conf.get(THRIFT_SSL_KEYSTORE_TYPE_KEY, THRIFT_SSL_KEYSTORE_TYPE_DEFAULT));
        String[] excludeCiphers = conf.getStrings(THRIFT_SSL_EXCLUDE_CIPHER_SUITES_KEY, ArrayUtils.EMPTY_STRING_ARRAY);
        if (excludeCiphers.length != 0) {
            sslCtxFactory.setExcludeCipherSuites(excludeCiphers);
        }
        String[] includeCiphers = conf.getStrings(THRIFT_SSL_INCLUDE_CIPHER_SUITES_KEY, ArrayUtils.EMPTY_STRING_ARRAY);
        if (includeCiphers.length != 0) {
            sslCtxFactory.setIncludeCipherSuites(includeCiphers);
        }
        // Disable SSLv3 by default due to "Poodle" Vulnerability - CVE-2014-3566
        String[] excludeProtocols = conf.getStrings(THRIFT_SSL_EXCLUDE_PROTOCOLS_KEY, "SSLv3");
        if (excludeProtocols.length != 0) {
            sslCtxFactory.setExcludeProtocols(excludeProtocols);
        }
        String[] includeProtocols = conf.getStrings(THRIFT_SSL_INCLUDE_PROTOCOLS_KEY, ArrayUtils.EMPTY_STRING_ARRAY);
        if (includeProtocols.length != 0) {
            sslCtxFactory.setIncludeProtocols(includeProtocols);
        }
        serverConnector = new ServerConnector(httpServer, new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig));
    } else {
        serverConnector = new ServerConnector(httpServer, new HttpConnectionFactory(httpConfig));
    }
    serverConnector.setPort(listenPort);
    serverConnector.setHost(getBindAddress(conf).getHostAddress());
    httpServer.addConnector(serverConnector);
    httpServer.setStopAtShutdown(true);
    if (doAsEnabled) {
        ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    }
    LOG.info("Starting Thrift HTTP Server on {}", Integer.toString(listenPort));
}
Also used : TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) SecureRequestCustomizer(org.apache.hbase.thirdparty.org.eclipse.jetty.server.SecureRequestCustomizer) Server(org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server) TThreadedSelectorServer(org.apache.thrift.server.TThreadedSelectorServer) TServer(org.apache.thrift.server.TServer) InfoServer(org.apache.hadoop.hbase.http.InfoServer) THsHaServer(org.apache.thrift.server.THsHaServer) TNonblockingServer(org.apache.thrift.server.TNonblockingServer) SaslRpcServer(org.apache.hadoop.security.SaslRpcServer) SaslServer(javax.security.sasl.SaslServer) HttpConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConnectionFactory) ServletHolder(org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder) SslConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.SslConnectionFactory) HttpConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConnectionFactory) TProtocolFactory(org.apache.thrift.protocol.TProtocolFactory) LoggerFactory(org.slf4j.LoggerFactory) SslContextFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.util.ssl.SslContextFactory) TTransportFactory(org.apache.thrift.transport.TTransportFactory) HttpConfiguration(org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConfiguration) SslConnectionFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.server.SslConnectionFactory) TServlet(org.apache.thrift.server.TServlet) ServerConnector(org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector) SslContextFactory(org.apache.hbase.thirdparty.org.eclipse.jetty.util.ssl.SslContextFactory) QueuedThreadPool(org.apache.hbase.thirdparty.org.eclipse.jetty.util.thread.QueuedThreadPool) ServletContextHandler(org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletContextHandler)

Aggregations

Server (org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server)6 ServerConnector (org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector)6 InfoServer (org.apache.hadoop.hbase.http.InfoServer)5 ServletHolder (org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder)5 IOException (java.io.IOException)3 UnknownHostException (java.net.UnknownHostException)3 HttpConfiguration (org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConfiguration)3 HttpConnectionFactory (org.apache.hbase.thirdparty.org.eclipse.jetty.server.HttpConnectionFactory)3 SecureRequestCustomizer (org.apache.hbase.thirdparty.org.eclipse.jetty.server.SecureRequestCustomizer)3 SslConnectionFactory (org.apache.hbase.thirdparty.org.eclipse.jetty.server.SslConnectionFactory)3 ServletContextHandler (org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletContextHandler)3 SslContextFactory (org.apache.hbase.thirdparty.org.eclipse.jetty.util.ssl.SslContextFactory)3 QueuedThreadPool (org.apache.hbase.thirdparty.org.eclipse.jetty.util.thread.QueuedThreadPool)3 LoggerFactory (org.slf4j.LoggerFactory)3 InterruptedIOException (java.io.InterruptedIOException)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)2 InetAddress (java.net.InetAddress)2 InetSocketAddress (java.net.InetSocketAddress)2 List (java.util.List)2 Map (java.util.Map)2