Search in sources :

Example 11 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class BulkLoadHFilesTool method createExecutorService.

// Initialize a thread pool
private ExecutorService createExecutorService() {
    ThreadPoolExecutor pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new ThreadFactoryBuilder().setNameFormat("BulkLoadHFilesTool-%1$d").setDaemon(true).build());
    pool.allowCoreThreadTimeOut(true);
    return pool;
}
Also used : ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 12 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class TestServerRemoteProcedure method setUp.

@Before
public void setUp() throws Exception {
    util = new HBaseTestingUtil();
    this.executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setUncaughtExceptionHandler((t, e) -> LOG.warn("Uncaught: ", e)).build());
    master = new MockMasterServices(util.getConfiguration(), this.regionsToRegionServers);
    rsDispatcher = new MockRSProcedureDispatcher(master);
    rsDispatcher.setMockRsExecutor(new NoopRSExecutor());
    master.start(2, rsDispatcher);
    am = master.getAssignmentManager();
    master.getServerManager().getOnlineServersList().stream().forEach(serverName -> am.getRegionStates().getOrCreateServer(serverName));
}
Also used : AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) SortedSet(java.util.SortedSet) MockMasterServices(org.apache.hadoop.hbase.master.assignment.MockMasterServices) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) SWITCH_RPC_THROTTLE(org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType.SWITCH_RPC_THROTTLE) Future(java.util.concurrent.Future) TestName(org.junit.rules.TestName) ProcedureStateSerializer(org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer) After(org.junit.After) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) MasterServices(org.apache.hadoop.hbase.master.MasterServices) Assert.fail(org.junit.Assert.fail) OpenRegionProcedure(org.apache.hadoop.hbase.master.assignment.OpenRegionProcedure) ClassRule(org.junit.ClassRule) ExpectedException(org.junit.rules.ExpectedException) ExecutorService(java.util.concurrent.ExecutorService) ServerName(org.apache.hadoop.hbase.ServerName) Bytes(org.apache.hadoop.hbase.util.Bytes) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) Logger(org.slf4j.Logger) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) RemoteProcedureDispatcher(org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher) Set(java.util.Set) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) IOException(java.io.IOException) Test(org.junit.Test) NavigableMap(java.util.NavigableMap) Category(org.junit.experimental.categories.Category) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) Executors(java.util.concurrent.Executors) TimeUnit(java.util.concurrent.TimeUnit) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) Rule(org.junit.Rule) AdminProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos) TransitRegionStateProcedure(org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure) RemoteProcedureException(org.apache.hadoop.hbase.procedure2.RemoteProcedureException) MasterTests(org.apache.hadoop.hbase.testclassification.MasterTests) Optional(java.util.Optional) Assert(org.junit.Assert) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) MockMasterServices(org.apache.hadoop.hbase.master.assignment.MockMasterServices) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Before(org.junit.Before)

Example 13 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class TestHBaseFsckMOB method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MasterSyncCoprocessor.class.getName());
    conf.setInt("hbase.regionserver.handler.count", 2);
    conf.setInt("hbase.regionserver.metahandler.count", 30);
    conf.setInt("hbase.htable.threads.max", POOL_SIZE);
    conf.setInt("hbase.hconnection.threads.max", 2 * POOL_SIZE);
    conf.setInt("hbase.hbck.close.timeout", 2 * REGION_ONLINE_TIMEOUT);
    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT);
    TEST_UTIL.startMiniCluster(1);
    tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("testhbck-pool-%d").setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
    hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE);
    AssignmentManager assignmentManager = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
    regionStates = assignmentManager.getRegionStates();
    connection = TEST_UTIL.getConnection();
    admin = connection.getAdmin();
    admin.balancerSwitch(false, true);
    TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
}
Also used : ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) SynchronousQueue(java.util.concurrent.SynchronousQueue) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) BeforeClass(org.junit.BeforeClass)

Example 14 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class HBaseInterClusterReplicationEndpoint method init.

@Override
public void init(Context context) throws IOException {
    super.init(context);
    decorateConf();
    this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300);
    this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier);
    // A Replicator job is bound by the RPC timeout. We will wait this long for all Replicator
    // tasks to terminate when doStop() is called.
    long maxTerminationWaitMultiplier = this.conf.getLong("replication.source.maxterminationmultiplier", DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER);
    this.maxTerminationWait = maxTerminationWaitMultiplier * this.conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
    this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000);
    this.metrics = context.getMetrics();
    // per sink thread pool
    this.maxThreads = this.conf.getInt(HConstants.REPLICATION_SOURCE_MAXTHREADS_KEY, HConstants.REPLICATION_SOURCE_MAXTHREADS_DEFAULT);
    this.exec = Threads.getBoundedCachedThreadPool(maxThreads, 60, TimeUnit.SECONDS, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("SinkThread-%d").build());
    this.abortable = ctx.getAbortable();
    // Set the size limit for replication RPCs to 95% of the max request size.
    // We could do with less slop if we have an accurate estimate of encoded size. Being
    // conservative for now.
    this.replicationRpcLimit = (int) (0.95 * conf.getLong(RpcServer.MAX_REQUEST_SIZE, RpcServer.DEFAULT_MAX_REQUEST_SIZE));
    this.dropOnDeletedTables = this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_TABLE_KEY, false);
    this.dropOnDeletedColumnFamilies = this.conf.getBoolean(REPLICATION_DROP_ON_DELETED_COLUMN_FAMILY_KEY, false);
    this.replicationBulkLoadDataEnabled = conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
    if (this.replicationBulkLoadDataEnabled) {
        replicationClusterId = this.conf.get(HConstants.REPLICATION_CLUSTER_ID);
    }
    // Construct base namespace directory and hfile archive directory path
    Path rootDir = CommonFSUtils.getRootDir(conf);
    Path baseNSDir = new Path(HConstants.BASE_NAMESPACE_DIR);
    baseNamespaceDir = new Path(rootDir, baseNSDir);
    hfileArchiveDir = new Path(rootDir, new Path(HConstants.HFILE_ARCHIVE_DIRECTORY, baseNSDir));
    isSerial = context.getPeerConfig().isSerial();
}
Also used : Path(org.apache.hadoop.fs.Path) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder)

Example 15 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class SplitTableRegionProcedure method splitStoreFiles.

/**
 * Create Split directory
 * @param env MasterProcedureEnv
 */
private Pair<List<Path>, List<Path>> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs) throws IOException {
    final Configuration conf = env.getMasterConfiguration();
    TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    // 
    // Note: From HBASE-26187, splitStoreFiles now creates daughter region dirs straight under the
    // table dir. In case of failure, the proc would go through this again, already existing
    // region dirs and split files would just be ignored, new split files should get created.
    int nbFiles = 0;
    final Map<String, Collection<StoreFileInfo>> files = new HashMap<String, Collection<StoreFileInfo>>(htd.getColumnFamilyCount());
    for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
        String family = cfd.getNameAsString();
        StoreFileTracker tracker = StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, cfd, regionFs);
        Collection<StoreFileInfo> sfis = tracker.load();
        if (sfis == null) {
            continue;
        }
        Collection<StoreFileInfo> filteredSfis = null;
        for (StoreFileInfo sfi : sfis) {
            // splitable.
            if (sfi.isReference()) {
                LOG.info("Skipping split of " + sfi + "; presuming ready for archiving.");
                continue;
            }
            if (filteredSfis == null) {
                filteredSfis = new ArrayList<StoreFileInfo>(sfis.size());
                files.put(family, filteredSfis);
            }
            filteredSfis.add(sfi);
            nbFiles++;
        }
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<>(Collections.emptyList(), Collections.emptyList());
    }
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT)), nbFiles);
    LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region=" + getParentRegion().getShortNameToLog() + ", threads=" + maxThreads);
    final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads, new ThreadFactoryBuilder().setNameFormat("StoreFileSplitter-pool-%d").setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
    final List<Future<Pair<Path, Path>>> futures = new ArrayList<Future<Pair<Path, Path>>>(nbFiles);
    // Split each store file.
    for (Map.Entry<String, Collection<StoreFileInfo>> e : files.entrySet()) {
        byte[] familyName = Bytes.toBytes(e.getKey());
        final ColumnFamilyDescriptor hcd = htd.getColumnFamily(familyName);
        final Collection<StoreFileInfo> storeFiles = e.getValue();
        if (storeFiles != null && storeFiles.size() > 0) {
            final Configuration storeConfiguration = StoreUtils.createStoreConfiguration(env.getMasterConfiguration(), htd, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                // As this procedure is running on master, use CacheConfig.DISABLED means
                // don't cache any block.
                // We also need to pass through a suitable CompoundConfiguration as if this
                // is running in a regionserver's Store context, or we might not be able
                // to read the hfiles.
                storeFileInfo.setConf(storeConfiguration);
                StoreFileSplitter sfs = new StoreFileSplitter(regionFs, familyName, new HStoreFile(storeFileInfo, hcd.getBloomFilterType(), CacheConfig.DISABLED));
                futures.add(threadPool.submit(sfs));
            }
        }
    }
    // Shutdown the pool
    threadPool.shutdown();
    // Wait for all the tasks to finish.
    // When splits ran on the RegionServer, how-long-to-wait-configuration was named
    // hbase.regionserver.fileSplitTimeout. If set, use its value.
    long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", conf.getLong("hbase.regionserver.fileSplitTimeout", 600000));
    try {
        boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException("Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }
    List<Path> daughterA = new ArrayList<>();
    List<Path> daughterB = new ArrayList<>();
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            if (p.getFirst() != null) {
                daughterA.add(p.getFirst());
            }
            if (p.getSecond() != null) {
                daughterB.add(p.getSecond());
            }
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("pid=" + getProcId() + " split storefiles for region " + getParentRegion().getShortNameToLog() + " Daughter A: " + daughterA + " storefiles, Daughter B: " + daughterB + " storefiles.");
    }
    return new Pair<>(daughterA, daughterB);
}
Also used : InterruptedIOException(java.io.InterruptedIOException) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) StoreFileTracker(org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker) ExecutionException(java.util.concurrent.ExecutionException) Pair(org.apache.hadoop.hbase.util.Pair) Path(org.apache.hadoop.fs.Path) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) ExecutorService(java.util.concurrent.ExecutorService) Collection(java.util.Collection) Future(java.util.concurrent.Future) HStoreFile(org.apache.hadoop.hbase.regionserver.HStoreFile) Map(java.util.Map) HashMap(java.util.HashMap) StoreFileInfo(org.apache.hadoop.hbase.regionserver.StoreFileInfo)

Aggregations

ThreadFactoryBuilder (org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder)25 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)9 ExecutorService (java.util.concurrent.ExecutorService)7 IOException (java.io.IOException)6 Configuration (org.apache.hadoop.conf.Configuration)6 Future (java.util.concurrent.Future)5 TableName (org.apache.hadoop.hbase.TableName)5 BeforeClass (org.junit.BeforeClass)5 ExecutionException (java.util.concurrent.ExecutionException)4 Executors (java.util.concurrent.Executors)4 TimeUnit (java.util.concurrent.TimeUnit)4 InterruptedIOException (java.io.InterruptedIOException)3 Arrays (java.util.Arrays)3 Path (org.apache.hadoop.fs.Path)3 Test (org.junit.Test)3 ArrayList (java.util.ArrayList)2 NavigableMap (java.util.NavigableMap)2 Random (java.util.Random)2 Set (java.util.Set)2 SortedSet (java.util.SortedSet)2