Search in sources :

Example 1 with SnapshotCleanerChore

use of org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore in project hbase by apache.

the class HMaster method startServiceThreads.

/*
   * Start up all services. If any of these threads gets an unhandled exception
   * then they just die with a logged message.  This should be fine because
   * in general, we do not expect the master to get such unhandled exceptions
   *  as OOMEs; it should be lightly loaded. See what HRegionServer does if
   *  need to install an unexpected exception handler.
   */
private void startServiceThreads() throws IOException {
    // Start the executor service pools
    final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS, HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize));
    final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS, HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION).setCorePoolSize(masterCloseRegionPoolSize));
    final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS, HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS).setCorePoolSize(masterServerOpThreads));
    final int masterServerMetaOpsThreads = conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS, HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS).setCorePoolSize(masterServerMetaOpsThreads));
    final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS, HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads));
    final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS).setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true));
    final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS, HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS).setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true));
    // We depend on there being only one instance of this executor running
    // at a time. To do concurrency, would need fencing of enable/disable of
    // tables.
    // Any time changing this maxThreads to > 1, pls see the comment at
    // AccessController#postCompletedCreateTableAction
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1));
    startProcedureExecutor();
    // Create log cleaner thread pool
    logCleanerPool = DirScanPool.getLogCleanerScanPool(conf);
    Map<String, Object> params = new HashMap<>();
    params.put(MASTER, this);
    // Start log cleaner thread
    int cleanerInterval = conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL);
    this.logCleaner = new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), logCleanerPool, params);
    getChoreService().scheduleChore(logCleaner);
    // start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    // Create archive cleaner thread pool
    hfileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(), archiveDir, hfileCleanerPool, params);
    getChoreService().scheduleChore(hfileCleaner);
    // Regions Reopen based on very high storeFileRefCount is considered enabled
    // only if hbase.regions.recovery.store.file.ref.count has value > 0
    final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD, HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD);
    if (maxStoreFileRefCount > 0) {
        this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this);
        getChoreService().scheduleChore(this.regionsRecoveryChore);
    } else {
        LOG.info("Reopening regions with very high storeFileRefCount is disabled. " + "Provide threshold value > 0 for {} to enable it.", HConstants.STORE_FILE_REF_COUNT_THRESHOLD);
    }
    this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this);
    replicationBarrierCleaner = new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager);
    getChoreService().scheduleChore(replicationBarrierCleaner);
    final boolean isSnapshotChoreEnabled = this.snapshotCleanupTracker.isSnapshotCleanupEnabled();
    this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager());
    if (isSnapshotChoreEnabled) {
        getChoreService().scheduleChore(this.snapshotCleanerChore);
    } else {
        if (LOG.isTraceEnabled()) {
            LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore..");
        }
    }
    serviceStarted = true;
    if (LOG.isTraceEnabled()) {
        LOG.trace("Started service threads");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) ReplicationHFileCleaner(org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) RSGroupAdminEndpoint(org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint) LogCleaner(org.apache.hadoop.hbase.master.cleaner.LogCleaner) ReplicationLogCleaner(org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner) SnapshotCleanerChore(org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore) ReplicationBarrierCleaner(org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner)

Aggregations

HashMap (java.util.HashMap)1 Path (org.apache.hadoop.fs.Path)1 HFileCleaner (org.apache.hadoop.hbase.master.cleaner.HFileCleaner)1 LogCleaner (org.apache.hadoop.hbase.master.cleaner.LogCleaner)1 ReplicationBarrierCleaner (org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner)1 SnapshotCleanerChore (org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore)1 ReplicationHFileCleaner (org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner)1 ReplicationLogCleaner (org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner)1 RSGroupAdminEndpoint (org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint)1