Search in sources :

Example 1 with ExecutorStatusChore

use of org.apache.hadoop.hbase.ExecutorStatusChore in project hbase by apache.

the class HRegionServer method startServices.

/**
 * Start maintenance Threads, Server, Worker and lease checker threads.
 * Start all threads we need to run. This is called after we've successfully
 * registered with the Master.
 * Install an UncaughtExceptionHandler that calls abort of RegionServer if we
 * get an unhandled exception. We cannot set the handler on all threads.
 * Server's internal Listener thread is off limits. For Server, if an OOME, it
 * waits a while then retries. Meantime, a flush or a compaction that tries to
 * run should trigger same critical condition and the shutdown will run. On
 * its way out, this server will shut down Server. Leases are sort of
 * inbetween. It has an internal thread that while it inherits from Chore, it
 * keeps its own internal stop mechanism so needs to be stopped by this
 * hosting server. Worker logs the exception and exits.
 */
private void startServices() throws IOException {
    if (!isStopped() && !isAborted()) {
        initializeThreads();
    }
    this.secureBulkLoadManager = new SecureBulkLoadManager(this.conf, asyncClusterConnection);
    this.secureBulkLoadManager.start();
    // Health checker thread.
    if (isHealthCheckerConfigured()) {
        int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, HConstants.DEFAULT_THREAD_WAKE_FREQUENCY);
        healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration());
    }
    // Executor status collect thread.
    if (this.conf.getBoolean(HConstants.EXECUTOR_STATUS_COLLECT_ENABLED, HConstants.DEFAULT_EXECUTOR_STATUS_COLLECT_ENABLED)) {
        int sleepTime = this.conf.getInt(ExecutorStatusChore.WAKE_FREQ, ExecutorStatusChore.DEFAULT_WAKE_FREQ);
        executorStatusChore = new ExecutorStatusChore(sleepTime, this, this.getExecutorService(), this.metricsRegionServer.getMetricsSource());
    }
    this.walRoller = new LogRoller(this);
    this.flushThroughputController = FlushThroughputControllerFactory.create(this, conf);
    this.procedureResultReporter = new RemoteProcedureResultReporter(this);
    // Create the CompactedFileDischarger chore executorService. This chore helps to
    // remove the compacted files that will no longer be used in reads.
    // Default is 2 mins. The default value for TTLCleaner is 5 mins so we set this to
    // 2 mins so that compacted files can be archived before the TTLCleaner runs
    int cleanerInterval = conf.getInt("hbase.hfile.compaction.discharger.interval", 2 * 60 * 1000);
    this.compactedFileDischarger = new CompactedHFilesDischarger(cleanerInterval, this, this);
    choreService.scheduleChore(compactedFileDischarger);
    // Start executor services
    final int openRegionThreads = conf.getInt("hbase.regionserver.executor.openregion.threads", 3);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_OPEN_REGION).setCorePoolSize(openRegionThreads));
    final int openMetaThreads = conf.getInt("hbase.regionserver.executor.openmeta.threads", 1);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_OPEN_META).setCorePoolSize(openMetaThreads));
    final int openPriorityRegionThreads = conf.getInt("hbase.regionserver.executor.openpriorityregion.threads", 3);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_OPEN_PRIORITY_REGION).setCorePoolSize(openPriorityRegionThreads));
    final int closeRegionThreads = conf.getInt("hbase.regionserver.executor.closeregion.threads", 3);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_CLOSE_REGION).setCorePoolSize(closeRegionThreads));
    final int closeMetaThreads = conf.getInt("hbase.regionserver.executor.closemeta.threads", 1);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_CLOSE_META).setCorePoolSize(closeMetaThreads));
    if (conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false)) {
        final int storeScannerParallelSeekThreads = conf.getInt("hbase.storescanner.parallel.seek.threads", 10);
        executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_PARALLEL_SEEK).setCorePoolSize(storeScannerParallelSeekThreads).setAllowCoreThreadTimeout(true));
    }
    final int logReplayOpsThreads = conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_LOG_REPLAY_OPS).setCorePoolSize(logReplayOpsThreads).setAllowCoreThreadTimeout(true));
    // Start the threads for compacted files discharger
    final int compactionDischargerThreads = conf.getInt(CompactionConfiguration.HBASE_HFILE_COMPACTION_DISCHARGER_THREAD_COUNT, 10);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_COMPACTED_FILES_DISCHARGER).setCorePoolSize(compactionDischargerThreads));
    if (ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(conf)) {
        final int regionReplicaFlushThreads = conf.getInt("hbase.regionserver.region.replica.flusher.threads", conf.getInt("hbase.regionserver.executor.openregion.threads", 3));
        executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_REGION_REPLICA_FLUSH_OPS).setCorePoolSize(regionReplicaFlushThreads));
    }
    final int refreshPeerThreads = conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_REFRESH_PEER).setCorePoolSize(refreshPeerThreads));
    final int replaySyncReplicationWALThreads = conf.getInt("hbase.regionserver.executor.replay.sync.replication.wal.threads", 1);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_REPLAY_SYNC_REPLICATION_WAL).setCorePoolSize(replaySyncReplicationWALThreads));
    final int switchRpcThrottleThreads = conf.getInt("hbase.regionserver.executor.switch.rpc.throttle.threads", 1);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_SWITCH_RPC_THROTTLE).setCorePoolSize(switchRpcThrottleThreads));
    final int claimReplicationQueueThreads = conf.getInt("hbase.regionserver.executor.claim.replication.queue.threads", 1);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.RS_CLAIM_REPLICATION_QUEUE).setCorePoolSize(claimReplicationQueueThreads));
    Threads.setDaemonThreadRunning(this.walRoller, getName() + ".logRoller", uncaughtExceptionHandler);
    if (this.cacheFlusher != null) {
        this.cacheFlusher.start(uncaughtExceptionHandler);
    }
    Threads.setDaemonThreadRunning(this.procedureResultReporter, getName() + ".procedureResultReporter", uncaughtExceptionHandler);
    if (this.compactionChecker != null) {
        choreService.scheduleChore(compactionChecker);
    }
    if (this.periodicFlusher != null) {
        choreService.scheduleChore(periodicFlusher);
    }
    if (this.healthCheckChore != null) {
        choreService.scheduleChore(healthCheckChore);
    }
    if (this.executorStatusChore != null) {
        choreService.scheduleChore(executorStatusChore);
    }
    if (this.nonceManagerChore != null) {
        choreService.scheduleChore(nonceManagerChore);
    }
    if (this.storefileRefresher != null) {
        choreService.scheduleChore(storefileRefresher);
    }
    if (this.fsUtilizationChore != null) {
        choreService.scheduleChore(fsUtilizationChore);
    }
    if (this.slowLogTableOpsChore != null) {
        choreService.scheduleChore(slowLogTableOpsChore);
    }
    if (this.brokenStoreFileCleaner != null) {
        choreService.scheduleChore(brokenStoreFileCleaner);
    }
    // Leases is not a Thread. Internally it runs a daemon thread. If it gets
    // an unhandled exception, it will just exit.
    Threads.setDaemonThreadRunning(this.leaseManager, getName() + ".leaseChecker", uncaughtExceptionHandler);
    // Create the log splitting worker and start it
    // set a smaller retries to fast fail otherwise splitlogworker could be blocked for
    // quite a while inside Connection layer. The worker won't be available for other
    // tasks even after current task is preempted after a split task times out.
    Configuration sinkConf = HBaseConfiguration.create(conf);
    sinkConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, // 8 retries take about 23 seconds
    conf.getInt("hbase.log.replay.retries.number", 8));
    sinkConf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, // default 30 seconds
    conf.getInt("hbase.log.replay.rpc.timeout", 30000));
    sinkConf.setInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, 1);
    if (this.csm != null && conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
        // SplitLogWorker needs csm. If none, don't start this.
        this.splitLogWorker = new SplitLogWorker(sinkConf, this, this, walFactory);
        splitLogWorker.start();
        LOG.debug("SplitLogWorker started");
    }
    // Memstore services.
    startHeapMemoryManager();
    // Call it after starting HeapMemoryManager.
    initializeMemStoreChunkCreator(hMemManager);
}
Also used : ExecutorStatusChore(org.apache.hadoop.hbase.ExecutorStatusChore) Configuration(org.apache.hadoop.conf.Configuration) CompactionConfiguration(org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) HealthCheckChore(org.apache.hadoop.hbase.HealthCheckChore)

Example 2 with ExecutorStatusChore

use of org.apache.hadoop.hbase.ExecutorStatusChore in project hbase by apache.

the class TestRSChoresScheduled method testDefaultScheduledChores.

@Test
public void testDefaultScheduledChores() throws Exception {
    // test if compactedHFilesDischarger chore is scheduled by default in HRegionServer init
    TestChoreField<CompactedHFilesDischarger> compactedHFilesDischargerTestChoreField = new TestChoreField<>();
    CompactedHFilesDischarger compactedHFilesDischarger = compactedHFilesDischargerTestChoreField.getChoreObj("compactedFileDischarger");
    compactedHFilesDischargerTestChoreField.testIfChoreScheduled(compactedHFilesDischarger);
    // test if compactionChecker chore is scheduled by default in HRegionServer init
    TestChoreField<ScheduledChore> compactionCheckerTestChoreField = new TestChoreField<>();
    ScheduledChore compactionChecker = compactionCheckerTestChoreField.getChoreObj("compactionChecker");
    compactionCheckerTestChoreField.testIfChoreScheduled(compactionChecker);
    // test if periodicFlusher chore is scheduled by default in HRegionServer init
    TestChoreField<ScheduledChore> periodicMemstoreFlusherTestChoreField = new TestChoreField<>();
    ScheduledChore periodicFlusher = periodicMemstoreFlusherTestChoreField.getChoreObj("periodicFlusher");
    periodicMemstoreFlusherTestChoreField.testIfChoreScheduled(periodicFlusher);
    // test if nonceManager chore is scheduled by default in HRegionServer init
    TestChoreField<ScheduledChore> nonceManagerTestChoreField = new TestChoreField<>();
    ScheduledChore nonceManagerChore = nonceManagerTestChoreField.getChoreObj("nonceManagerChore");
    nonceManagerTestChoreField.testIfChoreScheduled(nonceManagerChore);
    // test if executorStatusChore chore is scheduled by default in HRegionServer init
    TestChoreField<ExecutorStatusChore> executorStatusChoreTestChoreField = new TestChoreField<>();
    ExecutorStatusChore executorStatusChore = executorStatusChoreTestChoreField.getChoreObj("executorStatusChore");
    executorStatusChoreTestChoreField.testIfChoreScheduled(executorStatusChore);
}
Also used : ExecutorStatusChore(org.apache.hadoop.hbase.ExecutorStatusChore) ScheduledChore(org.apache.hadoop.hbase.ScheduledChore) Test(org.junit.Test)

Aggregations

ExecutorStatusChore (org.apache.hadoop.hbase.ExecutorStatusChore)2 Configuration (org.apache.hadoop.conf.Configuration)1 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)1 HealthCheckChore (org.apache.hadoop.hbase.HealthCheckChore)1 ScheduledChore (org.apache.hadoop.hbase.ScheduledChore)1 CompactionConfiguration (org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration)1 Test (org.junit.Test)1