Search in sources :

Example 6 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class TestAsyncTableGetMultiThreaded method test.

@Test
public void test() throws Exception {
    LOG.info("====== Test started ======");
    int numThreads = 7;
    AtomicBoolean stop = new AtomicBoolean(false);
    ExecutorService executor = Executors.newFixedThreadPool(numThreads, new ThreadFactoryBuilder().setNameFormat("TestAsyncGet-pool-%d").setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
    List<Future<?>> futures = new ArrayList<>();
    IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> {
        run(stop);
        return null;
    })));
    LOG.info("====== Scheduled {} read threads ======", numThreads);
    Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
    Admin admin = TEST_UTIL.getAdmin();
    for (byte[] splitPoint : SPLIT_KEYS) {
        int oldRegionCount = admin.getRegions(TABLE_NAME).size();
        LOG.info("====== Splitting at {} ======, region count before splitting is {}", Bytes.toStringBinary(splitPoint), oldRegionCount);
        admin.split(TABLE_NAME, splitPoint);
        TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {

            @Override
            public boolean evaluate() throws Exception {
                return TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME).size() > oldRegionCount;
            }

            @Override
            public String explainFailure() throws Exception {
                return "Split has not finished yet";
            }
        });
        List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME);
        LOG.info("====== Split at {} ======, region count after splitting is {}", Bytes.toStringBinary(splitPoint), regions.size());
        for (HRegion region : regions) {
            LOG.info("====== Compact {} ======", region.getRegionInfo());
            region.compact(true);
        }
        for (HRegion region : regions) {
            // Waiting for compaction to complete and references are cleaned up
            LOG.info("====== Waiting for compaction on {} ======", region.getRegionInfo());
            RetryCounter retrier = new RetryCounter(30, 1, TimeUnit.SECONDS);
            for (; ; ) {
                try {
                    if (admin.getCompactionStateForRegion(region.getRegionInfo().getRegionName()) == CompactionState.NONE) {
                        break;
                    }
                } catch (IOException e) {
                    LOG.warn("Failed to query");
                }
                if (!retrier.shouldRetry()) {
                    throw new IOException("Can not finish compaction in time after attempt " + retrier.getAttemptTimes() + " times");
                }
                retrier.sleepUntilNextRetry();
            }
            LOG.info("====== Compaction on {} finished, close and archive compacted files ======", region.getRegionInfo());
            region.getStores().get(0).closeAndArchiveCompactedFiles();
            LOG.info("====== Close and archive compacted files on {} done ======", region.getRegionInfo());
        }
        Thread.sleep(5000);
        LOG.info("====== Balancing cluster ======");
        admin.balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(true).build());
        LOG.info("====== Balance cluster done ======");
        Thread.sleep(5000);
        ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
        ServerName newMetaServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer)).findAny().get();
        LOG.info("====== Moving meta from {} to {} ======", metaServer, newMetaServer);
        admin.move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), newMetaServer);
        LOG.info("====== Move meta done ======");
        Thread.sleep(5000);
    }
    List<LogEntry> balancerDecisionRecords = admin.getLogEntries(null, "BALANCER_DECISION", ServerType.MASTER, 2, null);
    Assert.assertEquals(balancerDecisionRecords.size(), 2);
    LOG.info("====== Read test finished, shutdown thread pool ======");
    stop.set(true);
    executor.shutdown();
    for (int i = 0; i < numThreads; i++) {
        LOG.info("====== Waiting for {} threads to finish, remaining {} ======", numThreads, numThreads - i);
        futures.get(i).get();
    }
    LOG.info("====== Test test finished ======");
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) ClientTests(org.apache.hadoop.hbase.testclassification.ClientTests) ArrayList(java.util.ArrayList) Future(java.util.concurrent.Future) Closeables(org.apache.hbase.thirdparty.com.google.common.io.Closeables) ClassRule(org.junit.ClassRule) ExecutorService(java.util.concurrent.ExecutorService) ServerName(org.apache.hadoop.hbase.ServerName) Threads(org.apache.hadoop.hbase.util.Threads) MemoryCompactionPolicy(org.apache.hadoop.hbase.MemoryCompactionPolicy) Bytes(org.apache.hadoop.hbase.util.Bytes) TableName(org.apache.hadoop.hbase.TableName) AfterClass(org.junit.AfterClass) CompactingMemStore(org.apache.hadoop.hbase.regionserver.CompactingMemStore) Logger(org.slf4j.Logger) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RetryCounter(org.apache.hadoop.hbase.util.RetryCounter) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) LargeTests(org.apache.hadoop.hbase.testclassification.LargeTests) IOException(java.io.IOException) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) MAX_BUFFER_COUNT_KEY(org.apache.hadoop.hbase.io.ByteBuffAllocator.MAX_BUFFER_COUNT_KEY) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) HBASE_CLIENT_META_OPERATION_TIMEOUT(org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT) ExplainingPredicate(org.apache.hadoop.hbase.Waiter.ExplainingPredicate) Assert(org.junit.Assert) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) ArrayList(java.util.ArrayList) IOException(java.io.IOException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Random(java.util.Random) RetryCounter(org.apache.hadoop.hbase.util.RetryCounter) ServerName(org.apache.hadoop.hbase.ServerName) ExecutorService(java.util.concurrent.ExecutorService) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) Future(java.util.concurrent.Future) Test(org.junit.Test)

Example 7 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class TestAsyncFSWALRollStuck method setUp.

@BeforeClass
public static void setUp() throws Exception {
    Configuration conf = UTIL.getConfiguration();
    conf.setClass(AsyncFSWALProvider.WRITER_IMPL, TestAsyncWriter.class, AsyncWriter.class);
    // set a very small size so we will reach the batch size when writing out a single edit
    conf.setLong(AsyncFSWAL.WAL_BATCH_SIZE, 1);
    TN = TableName.valueOf("test");
    RI = RegionInfoBuilder.newBuilder(TN).build();
    MVCC = new MultiVersionConcurrencyControl();
    EXECUTOR = Executors.newScheduledThreadPool(2, new ThreadFactoryBuilder().setDaemon(true).build());
    Path rootDir = UTIL.getDataTestDir();
    ROLL_EXEC = Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true).build());
    WALActionsListener listener = new WALActionsListener() {

        @Override
        public void logRollRequested(RollRequestReason reason) {
            ROLL_EXEC.execute(() -> {
                try {
                    WAL.rollWriter();
                } catch (Exception e) {
                    LOG.warn("failed to roll writer", e);
                }
            });
        }
    };
    WAL = new AsyncFSWAL(UTIL.getTestFileSystem(), rootDir, "log", "oldlog", conf, Arrays.asList(listener), true, null, null, EVENT_LOOP_GROUP, CHANNEL_CLASS);
    WAL.init();
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) MultiVersionConcurrencyControl(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) IOException(java.io.IOException) BeforeClass(org.junit.BeforeClass)

Example 8 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class ModifyRegionUtils method getRegionOpenAndInitThreadPool.

/*
   * used by createRegions() to get the thread pool executor based on the
   * "hbase.hregion.open.and.init.threads.max" property.
   */
static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration conf, final String threadNamePrefix, int regionNumber) {
    int maxThreads = Math.min(regionNumber, conf.getInt("hbase.hregion.open.and.init.threads.max", 16));
    ThreadPoolExecutor regionOpenAndInitThreadPool = Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, new ThreadFactoryBuilder().setNameFormat(threadNamePrefix + "-pool-%d").setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
    return regionOpenAndInitThreadPool;
}
Also used : ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 9 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class TestRegionServerReportForDuty method testReportForDutyWithRSRpcRetry.

/**
 * Tests region sever reportForDuty with RS RPC retry
 */
@Test
public void testReportForDutyWithRSRpcRetry() throws Exception {
    ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().setNameFormat("RSDelayedStart-pool-%d").setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
    // Start a master and wait for it to become the active/primary master.
    // Use a random unique port
    cluster.getConfiguration().setInt(HConstants.MASTER_PORT, HBaseTestingUtil.randomFreePort());
    // Override the default RS RPC retry interval of 100ms to 300ms
    cluster.getConfiguration().setLong("hbase.regionserver.rpc.retry.interval", 300);
    cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
    cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 1);
    master = cluster.addMaster();
    rs = cluster.addRegionServer();
    LOG.debug("Starting master: " + master.getMaster().getServerName());
    master.start();
    // Delay the RS start so that the meta assignment fails in first attempt and goes to retry block
    scheduledThreadPoolExecutor.schedule(new Runnable() {

        @Override
        public void run() {
            rs.start();
        }
    }, 1000, TimeUnit.MILLISECONDS);
    waitForClusterOnline(master);
}
Also used : ScheduledThreadPoolExecutor(java.util.concurrent.ScheduledThreadPoolExecutor) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder) Test(org.junit.Test)

Example 10 with ThreadFactoryBuilder

use of org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder in project hbase by apache.

the class FSUtils method getRegionLocalityMappingFromFS.

/**
 * This function is to scan the root path of the file system to get either the
 * mapping between the region name and its best locality region server or the
 * degree of locality of each region on each of the servers having at least
 * one block of that region. The output map parameters are both optional.
 *
 * @param conf
 *          the configuration to use
 * @param desiredTable
 *          the table you wish to scan locality for
 * @param threadPoolSize
 *          the thread pool size to use
 * @param regionDegreeLocalityMapping
 *          the map into which to put the locality degree mapping or null,
 *          must be a thread-safe implementation
 * @throws IOException
 *           in case of file system errors or interrupts
 */
private static void getRegionLocalityMappingFromFS(final Configuration conf, final String desiredTable, int threadPoolSize, final Map<String, Map<String, Float>> regionDegreeLocalityMapping) throws IOException {
    final FileSystem fs = FileSystem.get(conf);
    final Path rootPath = CommonFSUtils.getRootDir(conf);
    final long startTime = EnvironmentEdgeManager.currentTime();
    final Path queryPath;
    // The table files are in ${hbase.rootdir}/data/<namespace>/<table>/*
    if (null == desiredTable) {
        queryPath = new Path(new Path(rootPath, HConstants.BASE_NAMESPACE_DIR).toString() + "/*/*/*/");
    } else {
        queryPath = new Path(CommonFSUtils.getTableDir(rootPath, TableName.valueOf(desiredTable)).toString() + "/*/");
    }
    // reject all paths that are not appropriate
    PathFilter pathFilter = new PathFilter() {

        @Override
        public boolean accept(Path path) {
            // this is the region name; it may get some noise data
            if (null == path) {
                return false;
            }
            // no parent?
            Path parent = path.getParent();
            if (null == parent) {
                return false;
            }
            String regionName = path.getName();
            if (null == regionName) {
                return false;
            }
            if (!regionName.toLowerCase(Locale.ROOT).matches("[0-9a-f]+")) {
                return false;
            }
            return true;
        }
    };
    FileStatus[] statusList = fs.globStatus(queryPath, pathFilter);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Query Path: {} ; # list of files: {}", queryPath, Arrays.toString(statusList));
    }
    if (null == statusList) {
        return;
    }
    // lower the number of threads in case we have very few expected regions
    threadPoolSize = Math.min(threadPoolSize, statusList.length);
    // run in multiple threads
    final ExecutorService tpe = Executors.newFixedThreadPool(threadPoolSize, new ThreadFactoryBuilder().setNameFormat("FSRegionQuery-pool-%d").setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
    try {
        // ignore all file status items that are not of interest
        for (FileStatus regionStatus : statusList) {
            if (null == regionStatus || !regionStatus.isDirectory()) {
                continue;
            }
            final Path regionPath = regionStatus.getPath();
            if (null != regionPath) {
                tpe.execute(new FSRegionScanner(fs, regionPath, null, regionDegreeLocalityMapping));
            }
        }
    } finally {
        tpe.shutdown();
        final long threadWakeFrequency = (long) conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, HConstants.DEFAULT_THREAD_WAKE_FREQUENCY);
        try {
            // exceptions in the execution of the threads
            while (!tpe.awaitTermination(threadWakeFrequency, TimeUnit.MILLISECONDS)) {
                // printing out rough estimate, so as to not introduce
                // AtomicInteger
                LOG.info("Locality checking is underway: { Scanned Regions : " + ((ThreadPoolExecutor) tpe).getCompletedTaskCount() + "/" + ((ThreadPoolExecutor) tpe).getTaskCount() + " }");
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        }
    }
    long overhead = EnvironmentEdgeManager.currentTime() - startTime;
    LOG.info("Scan DFS for locality info takes {}ms", overhead);
}
Also used : Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) PathFilter(org.apache.hadoop.fs.PathFilter) FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ExecutorService(java.util.concurrent.ExecutorService) ThreadFactoryBuilder(org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder)

Aggregations

ThreadFactoryBuilder (org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder)25 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)9 ExecutorService (java.util.concurrent.ExecutorService)7 IOException (java.io.IOException)6 Configuration (org.apache.hadoop.conf.Configuration)6 Future (java.util.concurrent.Future)5 TableName (org.apache.hadoop.hbase.TableName)5 BeforeClass (org.junit.BeforeClass)5 ExecutionException (java.util.concurrent.ExecutionException)4 Executors (java.util.concurrent.Executors)4 TimeUnit (java.util.concurrent.TimeUnit)4 InterruptedIOException (java.io.InterruptedIOException)3 Arrays (java.util.Arrays)3 Path (org.apache.hadoop.fs.Path)3 Test (org.junit.Test)3 ArrayList (java.util.ArrayList)2 NavigableMap (java.util.NavigableMap)2 Random (java.util.Random)2 Set (java.util.Set)2 SortedSet (java.util.SortedSet)2