use of org.apache.hadoop.hbase.io.hfile.LruAdaptiveBlockCache.EvictionThread in project hbase by apache.
the class TestLruAdaptiveBlockCache method testCacheEvictionThreadSafe.
@Test
public void testCacheEvictionThreadSafe() throws Exception {
long maxSize = 100000;
int numBlocks = 9;
int testRuns = 10;
final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks);
assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize);
final Configuration conf = HBaseConfiguration.create();
final LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize);
EvictionThread evictionThread = cache.getEvictionThread();
assertNotNull(evictionThread);
while (!evictionThread.isEnteringRun()) {
Thread.sleep(1000);
}
final String hfileName = "hfile";
int threads = 10;
final int blocksPerThread = 5 * numBlocks;
for (int run = 0; run != testRuns; ++run) {
final AtomicInteger blockCount = new AtomicInteger(0);
ExecutorService service = Executors.newFixedThreadPool(threads);
for (int i = 0; i != threads; ++i) {
service.execute(() -> {
for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) {
CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement());
boolean inMemory = Math.random() > 0.5;
cache.cacheBlock(block.cacheKey, block, inMemory);
}
cache.evictBlocksByHfileName(hfileName);
});
}
service.shutdown();
// The test may fail here if the evict thread frees the blocks too fast
service.awaitTermination(10, TimeUnit.MINUTES);
Waiter.waitFor(conf, 10000, 100, new ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return cache.getBlockCount() == 0;
}
@Override
public String explainFailure() throws Exception {
return "Cache block count failed to return to 0";
}
});
assertEquals(0, cache.getBlockCount());
assertEquals(cache.getOverhead(), cache.getCurrentSize());
}
}
use of org.apache.hadoop.hbase.io.hfile.LruAdaptiveBlockCache.EvictionThread in project hbase by apache.
the class TestLruAdaptiveBlockCache method testBackgroundEvictionThread.
@Test
public void testBackgroundEvictionThread() throws Exception {
long maxSize = 100000;
int numBlocks = 9;
long blockSize = calculateBlockSizeDefault(maxSize, numBlocks);
assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize);
LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize);
EvictionThread evictionThread = cache.getEvictionThread();
assertNotNull(evictionThread);
CachedItem[] blocks = generateFixedBlocks(numBlocks + 1, blockSize, "block");
// Make sure eviction thread has entered run method
while (!evictionThread.isEnteringRun()) {
Thread.sleep(1);
}
// Add all the blocks
for (CachedItem block : blocks) {
cache.cacheBlock(block.cacheKey, block);
}
// wait until at least one eviction has run
int n = 0;
while (cache.getStats().getEvictionCount() == 0) {
Thread.sleep(200);
assertTrue("Eviction never happened.", n++ < 20);
}
// let cache stabilize
// On some systems, the cache will run multiple evictions before it attains
// steady-state. For instance, after populating the cache with 10 blocks,
// the first eviction evicts a single block and then a second eviction
// evicts another. I think this is due to the delta between minSize and
// acceptableSize, combined with variance between object overhead on
// different environments.
n = 0;
for (long prevCnt = 0, /* < number of blocks added */
curCnt = cache.getBlockCount(); prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) {
Thread.sleep(200);
assertTrue("Cache never stabilized.", n++ < 20);
}
long evictionCount = cache.getStats().getEvictionCount();
assertTrue(evictionCount >= 1);
System.out.println("Background Evictions run: " + evictionCount);
}
use of org.apache.hadoop.hbase.io.hfile.LruAdaptiveBlockCache.EvictionThread in project hbase by apache.
the class TestLruAdaptiveBlockCache method testSkipCacheDataBlocksInteral.
public void testSkipCacheDataBlocksInteral(int heavyEvictionCountLimit) throws Exception {
long maxSize = 100000000;
int numBlocks = 100000;
final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks);
assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize);
final LruAdaptiveBlockCache cache = new LruAdaptiveBlockCache(maxSize, blockSize, true, (int) Math.ceil(1.2 * maxSize / blockSize), LruAdaptiveBlockCache.DEFAULT_LOAD_FACTOR, LruAdaptiveBlockCache.DEFAULT_CONCURRENCY_LEVEL, // min
0.5f, // acceptable
0.99f, // single
0.33f, // multi
0.33f, // memory
0.34f, // limit
1.2f, false, maxSize, heavyEvictionCountLimit, 200, 0.01f);
EvictionThread evictionThread = cache.getEvictionThread();
assertNotNull(evictionThread);
while (!evictionThread.isEnteringRun()) {
Thread.sleep(1);
}
final String hfileName = "hfile";
for (int blockIndex = 0; blockIndex <= numBlocks * 3000; ++blockIndex) {
CachedItem block = new CachedItem(hfileName, (int) blockSize, blockIndex);
cache.cacheBlock(block.cacheKey, block, false);
if (cache.getCacheDataBlockPercent() < 70) {
// enough for test
break;
}
}
evictionThread.evict();
Thread.sleep(100);
if (heavyEvictionCountLimit == 0) {
// Check if all offset (last two digits) of cached blocks less than the percent.
// It means some of blocks haven't put into BlockCache
assertTrue(cache.getCacheDataBlockPercent() < 90);
for (BlockCacheKey key : cache.getMapForTests().keySet()) {
assertTrue(!(key.getOffset() % 100 > 90));
}
} else {
// Check that auto-scaling is not working (all blocks in BlockCache)
assertTrue(cache.getCacheDataBlockPercent() == 100);
int counter = 0;
for (BlockCacheKey key : cache.getMapForTests().keySet()) {
if (key.getOffset() % 100 > 90) {
counter++;
}
}
assertTrue(counter > 1000);
}
evictionThread.shutdown();
}
Aggregations