use of org.apache.hadoop.hbase.ChoreService in project hbase by apache.
the class TestHeapMemoryManager method testWhenClusterIsReadHeavy.
@Test
public void testWhenClusterIsReadHeavy() throws Exception {
BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
Configuration conf = HBaseConfiguration.create();
conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_LOWER_LIMIT_KEY, 0.7f);
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f);
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.10f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.05f);
conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub(conf);
MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
// Empty memstore and but nearly filled block cache
blockCache.setTestBlockSize((long) (maxHeapSize * 0.4 * 0.8));
regionServerAccounting.setTestMemstoreSize(0);
// Let the system start with default values for memstore heap and block cache size.
HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), new RegionServerAccountingStub(conf));
long oldMemstoreHeapSize = memStoreFlusher.memstoreSize;
long oldBlockCacheSize = blockCache.maxSize;
long oldMemstoreLowerMarkSize = 7 * oldMemstoreHeapSize / 10;
long maxTuneSize = oldMemstoreHeapSize - (oldMemstoreLowerMarkSize + oldMemstoreHeapSize) / 2;
float maxStepValue = (maxTuneSize * 1.0f) / oldMemstoreHeapSize;
maxStepValue = maxStepValue > DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE ? DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE : maxStepValue;
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
heapMemoryManager.start(choreService);
blockCache.evictBlock(null);
blockCache.evictBlock(null);
blockCache.evictBlock(null);
// Allow the tuner to run once and do necessary memory up
waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
assertHeapSpaceDelta(-maxStepValue, oldMemstoreHeapSize, memStoreFlusher.memstoreSize);
assertHeapSpaceDelta(maxStepValue, oldBlockCacheSize, blockCache.maxSize);
oldMemstoreHeapSize = memStoreFlusher.memstoreSize;
oldBlockCacheSize = blockCache.maxSize;
oldMemstoreLowerMarkSize = 7 * oldMemstoreHeapSize / 10;
maxTuneSize = oldMemstoreHeapSize - (oldMemstoreLowerMarkSize + oldMemstoreHeapSize) / 2;
maxStepValue = (maxTuneSize * 1.0f) / oldMemstoreHeapSize;
maxStepValue = maxStepValue > DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE ? DefaultHeapMemoryTuner.DEFAULT_MAX_STEP_VALUE : maxStepValue;
// Do some more evictions before the next run of HeapMemoryTuner
blockCache.evictBlock(null);
// Allow the tuner to run once and do necessary memory up
waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
assertHeapSpaceDelta(-maxStepValue, oldMemstoreHeapSize, memStoreFlusher.memstoreSize);
assertHeapSpaceDelta(maxStepValue, oldBlockCacheSize, blockCache.maxSize);
}
use of org.apache.hadoop.hbase.ChoreService in project hbase by apache.
the class TestZooKeeperTableArchiveClient method runCleaner.
/**
* @param cleaner
*/
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop) throws InterruptedException {
final ChoreService choreService = new ChoreService("CLEANER_SERVER_NAME");
// run the cleaner
choreService.scheduleChore(cleaner);
// wait for the cleaner to check all the files
finished.await();
// stop the cleaner
stop.stop("");
}
use of org.apache.hadoop.hbase.ChoreService in project hbase by apache.
the class TestZooKeeperTableArchiveClient method testMultipleTables.
/**
* Test archiving/cleaning across multiple tables, where some are retained, and others aren't
* @throws Exception on failure
*/
@Test(timeout = 300000)
public void testMultipleTables() throws Exception {
createArchiveDirectory();
String otherTable = "otherTable";
FileSystem fs = UTIL.getTestFileSystem();
Path archiveDir = getArchiveDir();
Path tableDir = getTableDir(STRING_TABLE_NAME);
Path otherTableDir = getTableDir(otherTable);
// register cleanup for the created directories
toCleanup.add(archiveDir);
toCleanup.add(tableDir);
toCleanup.add(otherTableDir);
Configuration conf = UTIL.getConfiguration();
// setup the delegate
Stoppable stop = new StoppableImplementation();
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
// create the region
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);
List<Region> regions = new ArrayList<>();
regions.add(region);
when(rss.getOnlineRegions()).thenReturn(regions);
final CompactedHFilesDischarger compactionCleaner = new CompactedHFilesDischarger(100, stop, rss, false);
loadFlushAndCompact(region, TEST_FAM);
compactionCleaner.chore();
// create the another table that we don't archive
hcd = new HColumnDescriptor(TEST_FAM);
HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd);
regions = new ArrayList<>();
regions.add(otherRegion);
when(rss.getOnlineRegions()).thenReturn(regions);
final CompactedHFilesDischarger compactionCleaner1 = new CompactedHFilesDischarger(100, stop, rss, false);
loadFlushAndCompact(otherRegion, TEST_FAM);
compactionCleaner1.chore();
// get the current hfiles in the archive directory
// Should be archived
List<Path> files = getAllFiles(fs, archiveDir);
if (files == null) {
FSUtils.logFileSystemState(fs, archiveDir, LOG);
throw new RuntimeException("Didn't load archive any files!");
}
// make sure we have files from both tables
int initialCountForPrimary = 0;
int initialCountForOtherTable = 0;
for (Path file : files) {
String tableName = file.getParent().getParent().getParent().getName();
// check to which table this file belongs
if (tableName.equals(otherTable))
initialCountForOtherTable++;
else if (tableName.equals(STRING_TABLE_NAME))
initialCountForPrimary++;
}
assertTrue("Didn't archive files for:" + STRING_TABLE_NAME, initialCountForPrimary > 0);
assertTrue("Didn't archive files for:" + otherTable, initialCountForOtherTable > 0);
// run the cleaners, checking for each of the directories + files (both should be deleted and
// need to be checked) in 'otherTable' and the files (which should be retained) in the 'table'
CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size() + 3);
// run the cleaner
choreService.scheduleChore(cleaner);
// wait for the cleaner to check all the files
finished.await();
// stop the cleaner
stop.stop("");
// know the cleaner ran, so now check all the files again to make sure they are still there
List<Path> archivedFiles = getAllFiles(fs, archiveDir);
int archivedForPrimary = 0;
for (Path file : archivedFiles) {
String tableName = file.getParent().getParent().getParent().getName();
// ensure we don't have files from the non-archived table
assertFalse("Have a file from the non-archived table: " + file, tableName.equals(otherTable));
if (tableName.equals(STRING_TABLE_NAME))
archivedForPrimary++;
}
assertEquals("Not all archived files for the primary table were retained.", initialCountForPrimary, archivedForPrimary);
// but we still have the archive directory
assertTrue("Archive directory was deleted via archiver", fs.exists(archiveDir));
}
Aggregations