Search in sources :

Example 1 with ReplicationBarrierCleaner

use of org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner in project hbase by apache.

the class HMaster method startServiceThreads.

/*
   * Start up all services. If any of these threads gets an unhandled exception
   * then they just die with a logged message.  This should be fine because
   * in general, we do not expect the master to get such unhandled exceptions
   *  as OOMEs; it should be lightly loaded. See what HRegionServer does if
   *  need to install an unexpected exception handler.
   */
private void startServiceThreads() throws IOException {
    // Start the executor service pools
    final int masterOpenRegionPoolSize = conf.getInt(HConstants.MASTER_OPEN_REGION_THREADS, HConstants.MASTER_OPEN_REGION_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_OPEN_REGION).setCorePoolSize(masterOpenRegionPoolSize));
    final int masterCloseRegionPoolSize = conf.getInt(HConstants.MASTER_CLOSE_REGION_THREADS, HConstants.MASTER_CLOSE_REGION_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_CLOSE_REGION).setCorePoolSize(masterCloseRegionPoolSize));
    final int masterServerOpThreads = conf.getInt(HConstants.MASTER_SERVER_OPERATIONS_THREADS, HConstants.MASTER_SERVER_OPERATIONS_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SERVER_OPERATIONS).setCorePoolSize(masterServerOpThreads));
    final int masterServerMetaOpsThreads = conf.getInt(HConstants.MASTER_META_SERVER_OPERATIONS_THREADS, HConstants.MASTER_META_SERVER_OPERATIONS_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_META_SERVER_OPERATIONS).setCorePoolSize(masterServerMetaOpsThreads));
    final int masterLogReplayThreads = conf.getInt(HConstants.MASTER_LOG_REPLAY_OPS_THREADS, HConstants.MASTER_LOG_REPLAY_OPS_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.M_LOG_REPLAY_OPS).setCorePoolSize(masterLogReplayThreads));
    final int masterSnapshotThreads = conf.getInt(SnapshotManager.SNAPSHOT_POOL_THREADS_KEY, SnapshotManager.SNAPSHOT_POOL_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_SNAPSHOT_OPERATIONS).setCorePoolSize(masterSnapshotThreads).setAllowCoreThreadTimeout(true));
    final int masterMergeDispatchThreads = conf.getInt(HConstants.MASTER_MERGE_DISPATCH_THREADS, HConstants.MASTER_MERGE_DISPATCH_THREADS_DEFAULT);
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_MERGE_OPERATIONS).setCorePoolSize(masterMergeDispatchThreads).setAllowCoreThreadTimeout(true));
    // We depend on there being only one instance of this executor running
    // at a time. To do concurrency, would need fencing of enable/disable of
    // tables.
    // Any time changing this maxThreads to > 1, pls see the comment at
    // AccessController#postCompletedCreateTableAction
    executorService.startExecutorService(executorService.new ExecutorConfig().setExecutorType(ExecutorType.MASTER_TABLE_OPERATIONS).setCorePoolSize(1));
    startProcedureExecutor();
    // Create log cleaner thread pool
    logCleanerPool = DirScanPool.getLogCleanerScanPool(conf);
    Map<String, Object> params = new HashMap<>();
    params.put(MASTER, this);
    // Start log cleaner thread
    int cleanerInterval = conf.getInt(HBASE_MASTER_CLEANER_INTERVAL, DEFAULT_HBASE_MASTER_CLEANER_INTERVAL);
    this.logCleaner = new LogCleaner(cleanerInterval, this, conf, getMasterWalManager().getFileSystem(), getMasterWalManager().getOldLogDir(), logCleanerPool, params);
    getChoreService().scheduleChore(logCleaner);
    // start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    // Create archive cleaner thread pool
    hfileCleanerPool = DirScanPool.getHFileCleanerScanPool(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem().getFileSystem(), archiveDir, hfileCleanerPool, params);
    getChoreService().scheduleChore(hfileCleaner);
    // Regions Reopen based on very high storeFileRefCount is considered enabled
    // only if hbase.regions.recovery.store.file.ref.count has value > 0
    final int maxStoreFileRefCount = conf.getInt(HConstants.STORE_FILE_REF_COUNT_THRESHOLD, HConstants.DEFAULT_STORE_FILE_REF_COUNT_THRESHOLD);
    if (maxStoreFileRefCount > 0) {
        this.regionsRecoveryChore = new RegionsRecoveryChore(this, conf, this);
        getChoreService().scheduleChore(this.regionsRecoveryChore);
    } else {
        LOG.info("Reopening regions with very high storeFileRefCount is disabled. " + "Provide threshold value > 0 for {} to enable it.", HConstants.STORE_FILE_REF_COUNT_THRESHOLD);
    }
    this.regionsRecoveryConfigManager = new RegionsRecoveryConfigManager(this);
    replicationBarrierCleaner = new ReplicationBarrierCleaner(conf, this, getConnection(), replicationPeerManager);
    getChoreService().scheduleChore(replicationBarrierCleaner);
    final boolean isSnapshotChoreEnabled = this.snapshotCleanupTracker.isSnapshotCleanupEnabled();
    this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager());
    if (isSnapshotChoreEnabled) {
        getChoreService().scheduleChore(this.snapshotCleanerChore);
    } else {
        if (LOG.isTraceEnabled()) {
            LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore..");
        }
    }
    serviceStarted = true;
    if (LOG.isTraceEnabled()) {
        LOG.trace("Started service threads");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) ReplicationHFileCleaner(org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) RSGroupAdminEndpoint(org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint) LogCleaner(org.apache.hadoop.hbase.master.cleaner.LogCleaner) ReplicationLogCleaner(org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner) SnapshotCleanerChore(org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore) ReplicationBarrierCleaner(org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner)

Example 2 with ReplicationBarrierCleaner

use of org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner in project hbase by apache.

the class TestMasterChoreScheduled method testDefaultScheduledChores.

@Test
public void testDefaultScheduledChores() {
    // test if logCleaner chore is scheduled by default in HMaster init
    TestChoreField<LogCleaner> logCleanerTestChoreField = new TestChoreField<>();
    LogCleaner logCleaner = logCleanerTestChoreField.getChoreObj("logCleaner");
    logCleanerTestChoreField.testIfChoreScheduled(logCleaner);
    // test if hfileCleaner chore is scheduled by default in HMaster init
    TestChoreField<HFileCleaner> hFileCleanerTestChoreField = new TestChoreField<>();
    HFileCleaner hFileCleaner = hFileCleanerTestChoreField.getChoreObj("hfileCleaner");
    hFileCleanerTestChoreField.testIfChoreScheduled(hFileCleaner);
    // test if replicationBarrierCleaner chore is scheduled by default in HMaster init
    TestChoreField<ReplicationBarrierCleaner> replicationBarrierCleanerTestChoreField = new TestChoreField<>();
    ReplicationBarrierCleaner replicationBarrierCleaner = replicationBarrierCleanerTestChoreField.getChoreObj("replicationBarrierCleaner");
    replicationBarrierCleanerTestChoreField.testIfChoreScheduled(replicationBarrierCleaner);
    // test if clusterStatusChore chore is scheduled by default in HMaster init
    TestChoreField<ClusterStatusChore> clusterStatusChoreTestChoreField = new TestChoreField<>();
    ClusterStatusChore clusterStatusChore = clusterStatusChoreTestChoreField.getChoreObj("clusterStatusChore");
    clusterStatusChoreTestChoreField.testIfChoreScheduled(clusterStatusChore);
    // test if balancerChore chore is scheduled by default in HMaster init
    TestChoreField<BalancerChore> balancerChoreTestChoreField = new TestChoreField<>();
    BalancerChore balancerChore = balancerChoreTestChoreField.getChoreObj("balancerChore");
    balancerChoreTestChoreField.testIfChoreScheduled(balancerChore);
    // test if normalizerChore chore is scheduled by default in HMaster init
    ScheduledChore regionNormalizerChore = hMaster.getRegionNormalizerManager().getRegionNormalizerChore();
    TestChoreField<ScheduledChore> regionNormalizerChoreTestChoreField = new TestChoreField<>();
    regionNormalizerChoreTestChoreField.testIfChoreScheduled(regionNormalizerChore);
    // test if catalogJanitorChore chore is scheduled by default in HMaster init
    TestChoreField<CatalogJanitor> catalogJanitorTestChoreField = new TestChoreField<>();
    CatalogJanitor catalogJanitor = catalogJanitorTestChoreField.getChoreObj("catalogJanitorChore");
    catalogJanitorTestChoreField.testIfChoreScheduled(catalogJanitor);
    // test if hbckChore chore is scheduled by default in HMaster init
    TestChoreField<HbckChore> hbckChoreTestChoreField = new TestChoreField<>();
    HbckChore hbckChore = hbckChoreTestChoreField.getChoreObj("hbckChore");
    hbckChoreTestChoreField.testIfChoreScheduled(hbckChore);
}
Also used : BalancerChore(org.apache.hadoop.hbase.master.balancer.BalancerChore) CatalogJanitor(org.apache.hadoop.hbase.master.janitor.CatalogJanitor) ClusterStatusChore(org.apache.hadoop.hbase.master.balancer.ClusterStatusChore) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) LogCleaner(org.apache.hadoop.hbase.master.cleaner.LogCleaner) ReplicationBarrierCleaner(org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner) ScheduledChore(org.apache.hadoop.hbase.ScheduledChore) Test(org.junit.Test)

Aggregations

HFileCleaner (org.apache.hadoop.hbase.master.cleaner.HFileCleaner)2 LogCleaner (org.apache.hadoop.hbase.master.cleaner.LogCleaner)2 ReplicationBarrierCleaner (org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner)2 HashMap (java.util.HashMap)1 Path (org.apache.hadoop.fs.Path)1 ScheduledChore (org.apache.hadoop.hbase.ScheduledChore)1 BalancerChore (org.apache.hadoop.hbase.master.balancer.BalancerChore)1 ClusterStatusChore (org.apache.hadoop.hbase.master.balancer.ClusterStatusChore)1 SnapshotCleanerChore (org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore)1 CatalogJanitor (org.apache.hadoop.hbase.master.janitor.CatalogJanitor)1 ReplicationHFileCleaner (org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner)1 ReplicationLogCleaner (org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner)1 RSGroupAdminEndpoint (org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint)1 Test (org.junit.Test)1