use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestBlockEvictionFromClient method testBlockEvictionAfterHBASE13082WithCompactionAndFlush.
@Test
public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() throws IOException, InterruptedException {
// do flush and scan in parallel
Table table = null;
try {
latch = new CountDownLatch(1);
compactionLatch = new CountDownLatch(1);
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create a table with block size as 1024
table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
// get the block cache and region
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
HStore store = region.getStores().iterator().next();
CacheConfig cacheConf = store.getCacheConfig();
cacheConf.setCacheDataOnWrite(true);
cacheConf.setEvictOnClose(true);
BlockCache cache = cacheConf.getBlockCache().get();
// insert data. 2 Rows are added
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
// Should create one Hfile with 2 blocks
region.flush(true);
// read the data and expect same blocks, one new hit, no misses
int refCount = 0;
// Check how this miss is happening
// insert a second column, read the row, no new blocks, 3 new hits
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
byte[] data2 = Bytes.add(data, data);
put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
// flush, one new block
System.out.println("Flushing cache");
region.flush(true);
Iterator<CachedBlock> iterator = cache.iterator();
iterateBlockCache(cache, iterator);
// Create three sets of scan
ScanThread[] scanThreads = initiateScan(table, false);
Thread.sleep(100);
iterator = cache.iterator();
boolean usedBlocksFound = false;
while (iterator.hasNext()) {
CachedBlock next = iterator.next();
BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
if (cache instanceof BucketCache) {
refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
} else if (cache instanceof CombinedBlockCache) {
refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
} else {
continue;
}
if (refCount != 0) {
// Blocks will be with count 3
assertEquals(NO_OF_THREADS, refCount);
usedBlocksFound = true;
}
}
// Make a put and do a flush
QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
data2 = Bytes.add(data, data);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
// flush, one new block
System.out.println("Flushing cache");
region.flush(true);
assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
usedBlocksFound = false;
System.out.println("Compacting");
assertEquals(3, store.getStorefilesCount());
store.triggerMajorCompaction();
region.compact(true);
// wait 10 seconds max
waitForStoreFileCount(store, 1, 10000);
assertEquals(1, store.getStorefilesCount());
// Even after compaction is done we will have some blocks that cannot
// be evicted this is because the scan is still referencing them
iterator = cache.iterator();
while (iterator.hasNext()) {
CachedBlock next = iterator.next();
BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
if (cache instanceof BucketCache) {
refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
} else if (cache instanceof CombinedBlockCache) {
refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
} else {
continue;
}
if (refCount != 0) {
// Blocks will be with count 3 as they are not yet cleared
assertEquals(NO_OF_THREADS, refCount);
usedBlocksFound = true;
}
}
assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
// Should not throw exception
compactionLatch.countDown();
latch.countDown();
for (ScanThread thread : scanThreads) {
thread.join();
}
// by this time all blocks should have been evicted
iterator = cache.iterator();
// Since a flush and compaction happened after a scan started
// we need to ensure that all the original blocks of the compacted file
// is also removed.
iterateBlockCache(cache, iterator);
Result r = table.get(new Get(ROW));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
// The gets would be working on new blocks
iterator = cache.iterator();
iterateBlockCache(cache, iterator);
} finally {
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestClientSideRegionScanner method testConfiguredBlockCache.
@Test
public void testConfiguredBlockCache() throws IOException {
Configuration copyConf = new Configuration(conf);
// tiny 1MB fixed cache size
long blockCacheFixedSize = 1024 * 1024L;
copyConf.setLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, blockCacheFixedSize);
ClientSideRegionScanner clientSideRegionScanner = new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null);
BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache();
assertNotNull(blockCache);
assertTrue(blockCache instanceof IndexOnlyLruBlockCache);
assertTrue(blockCacheFixedSize == blockCache.getMaxSize());
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestClientSideRegionScanner method testNoBlockCache.
@Test
public void testNoBlockCache() throws IOException {
Configuration copyConf = new Configuration(conf);
copyConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
ClientSideRegionScanner clientSideRegionScanner = new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null);
BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache();
assertNull(blockCache);
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestCacheOnWriteInSchema method readStoreFile.
private void readStoreFile(Path path) throws IOException {
CacheConfig cacheConf = store.getCacheConfig();
BlockCache cache = cacheConf.getBlockCache().get();
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
sf.initReader();
HFile.Reader reader = sf.getReader().getHFileReader();
try {
// Open a scanner with (on read) caching disabled
HFileScanner scanner = reader.getScanner(conf, false, false);
assertTrue(testDescription, scanner.seekTo());
// Cribbed from io.hfile.TestCacheOnWrite
long offset = 0;
while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
// Flags: don't cache the block, use pread, this is not a compaction.
// Also, pass null for expected block type to avoid checking it.
HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE);
BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
final BlockType blockType = block.getBlockType();
if (shouldBeCached != isCached && (cowType.blockType1.equals(blockType) || cowType.blockType2.equals(blockType))) {
throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block + "\n" + "blockCacheKey: " + blockCacheKey);
}
offset += block.getOnDiskSizeWithHeader();
}
} finally {
reader.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestClearRegionBlockCache method testClearBlockCache.
@Test
public void testClearBlockCache() throws Exception {
BlockCache blockCache1 = rs1.getBlockCache().get();
BlockCache blockCache2 = rs2.getBlockCache().get();
long initialBlockCount1 = blockCache1.getBlockCount();
long initialBlockCount2 = blockCache2.getBlockCount();
// scan will cause blocks to be added in BlockCache
scanAllRegionsForRS(rs1);
assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
clearRegionBlockCache(rs1);
scanAllRegionsForRS(rs2);
assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
clearRegionBlockCache(rs2);
assertEquals("" + blockCache1.getBlockCount(), initialBlockCount1, blockCache1.getBlockCount());
assertEquals("" + blockCache2.getBlockCount(), initialBlockCount2, blockCache2.getBlockCount());
}
Aggregations