use of org.apache.hadoop.hbase.io.ByteBuffAllocator in project hbase by apache.
the class TestHFile method testReaderWithCombinedBlockCache.
/**
* Test case for HBASE-22127 in CombinedBlockCache
*/
@Test
public void testReaderWithCombinedBlockCache() throws Exception {
int bufCount = 1024, blockSize = 64 * 1024;
ByteBuffAllocator alloc = initAllocator(true, bufCount, blockSize, 0);
fillByteBuffAllocator(alloc, bufCount);
Path storeFilePath = writeStoreFile();
// Open the file reader with CombinedBlockCache
BlockCache combined = initCombinedBlockCache("LRU");
conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true);
CacheConfig cacheConfig = new CacheConfig(conf, null, combined, alloc);
HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf);
long offset = 0;
while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
BlockCacheKey key = new BlockCacheKey(storeFilePath.getName(), offset);
HFileBlock block = reader.readBlock(offset, -1, true, true, false, true, null, null);
offset += block.getOnDiskSizeWithHeader();
// Read the cached block.
Cacheable cachedBlock = combined.getBlock(key, false, false, true);
try {
Assert.assertNotNull(cachedBlock);
Assert.assertTrue(cachedBlock instanceof HFileBlock);
HFileBlock hfb = (HFileBlock) cachedBlock;
// Data block will be cached in BucketCache, so it should be an off-heap block.
if (hfb.getBlockType().isData()) {
Assert.assertTrue(hfb.isSharedMem());
} else {
// Non-data block will be cached in LRUBlockCache, so it must be an on-heap block.
Assert.assertFalse(hfb.isSharedMem());
}
} finally {
cachedBlock.release();
}
// return back the ByteBuffer back to allocator.
block.release();
}
reader.close();
combined.shutdown();
Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
alloc.clean();
}
Aggregations