use of org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread in project hbase by apache.
the class TestLruBlockCache method testCacheEvictionThreadSafe.
@Test
public void testCacheEvictionThreadSafe() throws Exception {
long maxSize = 100000;
int numBlocks = 9;
int testRuns = 10;
final long blockSize = calculateBlockSizeDefault(maxSize, numBlocks);
assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize);
final LruBlockCache cache = new LruBlockCache(maxSize, blockSize);
EvictionThread evictionThread = cache.getEvictionThread();
assertTrue(evictionThread != null);
while (!evictionThread.isEnteringRun()) {
Thread.sleep(1);
}
final String hfileName = "hfile";
int threads = 10;
final int blocksPerThread = 5 * numBlocks;
for (int run = 0; run != testRuns; ++run) {
final AtomicInteger blockCount = new AtomicInteger(0);
ExecutorService service = Executors.newFixedThreadPool(threads);
for (int i = 0; i != threads; ++i) {
service.execute(new Runnable() {
@Override
public void run() {
for (int blockIndex = 0; blockIndex < blocksPerThread || (!cache.isEvictionInProgress()); ++blockIndex) {
CachedItem block = new CachedItem(hfileName, (int) blockSize, blockCount.getAndIncrement());
boolean inMemory = Math.random() > 0.5;
cache.cacheBlock(block.cacheKey, block, inMemory, false);
}
cache.evictBlocksByHfileName(hfileName);
}
});
}
service.shutdown();
// The test may fail here if the evict thread frees the blocks too fast
service.awaitTermination(10, TimeUnit.MINUTES);
assertEquals(0, cache.getBlockCount());
assertEquals(cache.getOverhead(), cache.getCurrentSize());
}
}
use of org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread in project hbase by apache.
the class TestLruBlockCache method testBackgroundEvictionThread.
@Test
public void testBackgroundEvictionThread() throws Exception {
long maxSize = 100000;
int numBlocks = 9;
long blockSize = calculateBlockSizeDefault(maxSize, numBlocks);
assertTrue("calculateBlockSize appears broken.", blockSize * numBlocks <= maxSize);
LruBlockCache cache = new LruBlockCache(maxSize, blockSize);
EvictionThread evictionThread = cache.getEvictionThread();
assertTrue(evictionThread != null);
CachedItem[] blocks = generateFixedBlocks(numBlocks + 1, blockSize, "block");
// Make sure eviction thread has entered run method
while (!evictionThread.isEnteringRun()) {
Thread.sleep(1);
}
// Add all the blocks
for (CachedItem block : blocks) {
cache.cacheBlock(block.cacheKey, block);
}
// wait until at least one eviction has run
int n = 0;
while (cache.getStats().getEvictionCount() == 0) {
Thread.sleep(200);
assertTrue("Eviction never happened.", n++ < 20);
}
// let cache stabilize
// On some systems, the cache will run multiple evictions before it attains
// steady-state. For instance, after populating the cache with 10 blocks,
// the first eviction evicts a single block and then a second eviction
// evicts another. I think this is due to the delta between minSize and
// acceptableSize, combined with variance between object overhead on
// different environments.
n = 0;
for (long prevCnt = 0, /* < number of blocks added */
curCnt = cache.getBlockCount(); prevCnt != curCnt; prevCnt = curCnt, curCnt = cache.getBlockCount()) {
Thread.sleep(200);
assertTrue("Cache never stabilized.", n++ < 20);
}
long evictionCount = cache.getStats().getEvictionCount();
assertTrue(evictionCount >= 1);
System.out.println("Background Evictions run: " + evictionCount);
}
Aggregations