Search in sources :

Example 16 with ChoreService

use of org.apache.hadoop.hbase.ChoreService in project hbase by apache.

the class TestHeapMemoryManager method testWhenClusterIsReadHeavy.

@Test
public void testWhenClusterIsReadHeavy() throws Exception {
    BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
    Configuration conf = HBaseConfiguration.create();
    conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_LOWER_LIMIT_KEY, 0.7f);
    conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f);
    conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.10f);
    conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f);
    conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.05f);
    conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
    conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
    RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf);
    MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
    // Empty memstore and but nearly filled block cache
    blockCache.setTestBlockSize((long) (maxHeapSize * 0.4 * 0.8));
    regionServerAccounting.setTestMemstoreSize(0);
    // Let the system start with default values for memstore heap and block cache size.
    HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), new RegionServerAccountingStub(conf));
    long oldMemstoreHeapSize = memStoreFlusher.memstoreSize;
    long oldBlockCacheSize = blockCache.maxSize;
    long oldMemstoreLowerMarkSize = 7 * oldMemstoreHeapSize / 10;
    long maxTuneSize = oldMemstoreHeapSize - (oldMemstoreLowerMarkSize + oldMemstoreHeapSize) / 2;
    float maxStepValue = (maxTuneSize * 1.0f) / oldMemstoreHeapSize;
    maxStepValue = maxStepValue > DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE ? DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE : maxStepValue;
    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
    heapMemoryManager.start(choreService);
    blockCache.evictBlock(null);
    blockCache.evictBlock(null);
    blockCache.evictBlock(null);
    // Allow the tuner to run once and do necessary memory up
    waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
    assertHeapSpaceDelta(-maxStepValue, oldMemstoreHeapSize, memStoreFlusher.memstoreSize);
    assertHeapSpaceDelta(maxStepValue, oldBlockCacheSize, blockCache.maxSize);
    oldMemstoreHeapSize = memStoreFlusher.memstoreSize;
    oldBlockCacheSize = blockCache.maxSize;
    oldMemstoreLowerMarkSize = 7 * oldMemstoreHeapSize / 10;
    maxTuneSize = oldMemstoreHeapSize - (oldMemstoreLowerMarkSize + oldMemstoreHeapSize) / 2;
    maxStepValue = (maxTuneSize * 1.0f) / oldMemstoreHeapSize;
    maxStepValue = maxStepValue > DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE ? DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE : maxStepValue;
    // Do some more evictions before the next run of HeapMemoryTuner
    blockCache.evictBlock(null);
    // Allow the tuner to run once and do necessary memory up
    waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
    assertHeapSpaceDelta(-maxStepValue, oldMemstoreHeapSize, memStoreFlusher.memstoreSize);
    assertHeapSpaceDelta(maxStepValue, oldBlockCacheSize, blockCache.maxSize);
}
Also used : ChoreService(org.apache.hadoop.hbase.ChoreService) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Test(org.junit.Test)

Example 17 with ChoreService

use of org.apache.hadoop.hbase.ChoreService in project hbase by apache.

the class TestZooKeeperTableArchiveClient method runCleaner.

/**
   * @param cleaner
   */
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop) throws InterruptedException {
    final ChoreService choreService = new ChoreService("CLEANER_SERVER_NAME");
    // run the cleaner
    choreService.scheduleChore(cleaner);
    // wait for the cleaner to check all the files
    finished.await();
    // stop the cleaner
    stop.stop("");
}
Also used : ChoreService(org.apache.hadoop.hbase.ChoreService)

Example 18 with ChoreService

use of org.apache.hadoop.hbase.ChoreService in project hbase by apache.

the class TestZooKeeperTableArchiveClient method testMultipleTables.

/**
   * Test archiving/cleaning across multiple tables, where some are retained, and others aren't
   * @throws Exception on failure
   */
@Test(timeout = 300000)
public void testMultipleTables() throws Exception {
    createArchiveDirectory();
    String otherTable = "otherTable";
    FileSystem fs = UTIL.getTestFileSystem();
    Path archiveDir = getArchiveDir();
    Path tableDir = getTableDir(STRING_TABLE_NAME);
    Path otherTableDir = getTableDir(otherTable);
    // register cleanup for the created directories
    toCleanup.add(archiveDir);
    toCleanup.add(tableDir);
    toCleanup.add(otherTableDir);
    Configuration conf = UTIL.getConfiguration();
    // setup the delegate
    Stoppable stop = new StoppableImplementation();
    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
    HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
    List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
    final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
    // create the region
    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
    HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
    List<Region> regions = new ArrayList<>();
    regions.add(region);
    when(rss.getOnlineRegions()).thenReturn(regions);
    final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(region, TEST_FAM);
    compactionCleaner.chore();
    // create the another table that we don't archive
    hcd = new HColumnDescriptor(TEST_FAM);
    HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
    regions = new ArrayList<>();
    regions.add(otherRegion);
    when(rss.getOnlineRegions()).thenReturn(regions);
    final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop, rss, false);
    loadFlushAndCompact(otherRegion, TEST_FAM);
    compactionCleaner1.chore();
    // get the current hfiles in the archive directory
    // Should  be archived
    List<Path> files = getAllFiles(fs, archiveDir);
    if (files == null) {
        FSUtils.logFileSystemState(fs, archiveDir, LOG);
        throw new RuntimeException("Didn't load archive any files!");
    }
    // make sure we have files from both tables
    int initialCountForPrimary = 0;
    int initialCountForOtherTable = 0;
    for (Path file : files) {
        String tableName = file.getParent().getParent().getParent().getName();
        // check to which table this file belongs
        if (tableName.equals(otherTable))
            initialCountForOtherTable++;
        else if (tableName.equals(STRING_TABLE_NAME))
            initialCountForPrimary++;
    }
    assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
    assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);
    // run the cleaners, checking for each of the directories + files (both should be deleted and
    // need to be checked) in 'otherTable' and the files (which should be retained) in the 'table'
    CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size() + 3);
    // run the cleaner
    choreService.scheduleChore(cleaner);
    // wait for the cleaner to check all the files
    finished.await();
    // stop the cleaner
    stop.stop("");
    // know the cleaner ran, so now check all the files again to make sure they are still there
    List<Path> archivedFiles = getAllFiles(fs, archiveDir);
    int archivedForPrimary = 0;
    for (Path file : archivedFiles) {
        String tableName = file.getParent().getParent().getParent().getName();
        // ensure we don't have files from the non-archived table
        assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
        if (tableName.equals(STRING_TABLE_NAME))
            archivedForPrimary++;
    }
    assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary, archivedForPrimary);
    // but we still have the archive directory
    assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
}
Also used : Path(org.apache.hadoop.fs.Path) BaseHFileCleanerDelegate(org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate) Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) StoppableImplementation(org.apache.hadoop.hbase.util.StoppableImplementation) ArrayList(java.util.ArrayList) Stoppable(org.apache.hadoop.hbase.Stoppable) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) CountDownLatch(java.util.concurrent.CountDownLatch) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ChoreService(org.apache.hadoop.hbase.ChoreService) CompactedHFilesDischarger(org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger) FileSystem(org.apache.hadoop.fs.FileSystem) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Aggregations

ChoreService (org.apache.hadoop.hbase.ChoreService)18 Test (org.junit.Test)16 Configuration (org.apache.hadoop.conf.Configuration)15 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)14 ArrayList (java.util.ArrayList)2 Stoppable (org.apache.hadoop.hbase.Stoppable)2 StoppableImplementation (org.apache.hadoop.hbase.util.StoppableImplementation)2 CountDownLatch (java.util.concurrent.CountDownLatch)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)1 ScheduledChore (org.apache.hadoop.hbase.ScheduledChore)1 TableName (org.apache.hadoop.hbase.TableName)1 Connection (org.apache.hadoop.hbase.client.Connection)1 Table (org.apache.hadoop.hbase.client.Table)1 BaseHFileCleanerDelegate (org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate)1 HFileCleaner (org.apache.hadoop.hbase.master.cleaner.HFileCleaner)1 CompactedHFilesDischarger (org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger)1 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)1 Region (org.apache.hadoop.hbase.regionserver.Region)1