use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestAvoidCellReferencesIntoShippedBlocks method evictBlock.
/**
* For {@link BucketCache},we only evict Block if there is no rpc referenced.
*/
private void evictBlock(BlockCache blockCache, BlockCacheKey blockCacheKey) {
assertTrue(blockCache instanceof CombinedBlockCache);
BlockCache[] blockCaches = blockCache.getBlockCaches();
for (BlockCache currentBlockCache : blockCaches) {
if (currentBlockCache instanceof BucketCache) {
((BucketCache) currentBlockCache).evictBlockIfNoRpcReferenced(blockCacheKey);
} else {
currentBlockCache.evictBlock(blockCacheKey);
}
}
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestAvoidCellReferencesIntoShippedBlocks method testHBase16372InCompactionWritePath.
@Test
public void testHBase16372InCompactionWritePath() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create a table with block size as 1024
final Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CompactorRegionObserver.class.getName());
try {
// get the block cache and region
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
HRegion region = (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
HStore store = region.getStores().iterator().next();
CacheConfig cacheConf = store.getCacheConfig();
cacheConf.setCacheDataOnWrite(true);
cacheConf.setEvictOnClose(true);
final BlockCache cache = cacheConf.getBlockCache().get();
// insert data. 5 Rows are added
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER1, data);
table.put(put);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
// data was in memstore so don't expect any changes
region.flush(true);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER1, data);
table.put(put);
put = new Put(ROW2);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW2);
put.addColumn(FAMILY, QUALIFIER1, data);
table.put(put);
// data was in memstore so don't expect any changes
region.flush(true);
put = new Put(ROW3);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW3);
put.addColumn(FAMILY, QUALIFIER1, data);
table.put(put);
put = new Put(ROW4);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
// data was in memstore so don't expect any changes
region.flush(true);
put = new Put(ROW4);
put.addColumn(FAMILY, QUALIFIER1, data);
table.put(put);
put = new Put(ROW5);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW5);
put.addColumn(FAMILY, QUALIFIER1, data);
table.put(put);
// data was in memstore so don't expect any changes
region.flush(true);
// Load cache
Scan s = new Scan();
s.setMaxResultSize(1000);
int count;
try (ResultScanner scanner = table.getScanner(s)) {
count = Iterables.size(scanner);
}
assertEquals("Count all the rows ", 6, count);
// all the cache is loaded
// trigger a major compaction
ScannerThread scannerThread = new ScannerThread(table, cache);
scannerThread.start();
region.compact(true);
s = new Scan();
s.setMaxResultSize(1000);
try (ResultScanner scanner = table.getScanner(s)) {
count = Iterables.size(scanner);
}
assertEquals("Count all the rows ", 6, count);
} finally {
table.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestBlockEvictionFromClient method testScanWithCompactionInternals.
private void testScanWithCompactionInternals(String tableNameStr, boolean reversed) throws IOException, InterruptedException {
Table table = null;
try {
latch = new CountDownLatch(1);
compactionLatch = new CountDownLatch(1);
TableName tableName = TableName.valueOf(tableNameStr);
// Create a table with block size as 1024
table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
// get the block cache and region
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
HStore store = region.getStores().iterator().next();
CacheConfig cacheConf = store.getCacheConfig();
cacheConf.setCacheDataOnWrite(true);
cacheConf.setEvictOnClose(true);
BlockCache cache = cacheConf.getBlockCache().get();
// insert data. 2 Rows are added
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
// Should create one Hfile with 2 blocks
region.flush(true);
// read the data and expect same blocks, one new hit, no misses
int refCount = 0;
// Check how this miss is happening
// insert a second column, read the row, no new blocks, 3 new hits
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
byte[] data2 = Bytes.add(data, data);
put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
// flush, one new block
System.out.println("Flushing cache");
region.flush(true);
Iterator<CachedBlock> iterator = cache.iterator();
iterateBlockCache(cache, iterator);
// Create three sets of scan
ScanThread[] scanThreads = initiateScan(table, reversed);
Thread.sleep(100);
iterator = cache.iterator();
boolean usedBlocksFound = false;
while (iterator.hasNext()) {
CachedBlock next = iterator.next();
BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
if (cache instanceof BucketCache) {
refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
} else if (cache instanceof CombinedBlockCache) {
refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
} else {
continue;
}
if (refCount != 0) {
// Blocks will be with count 3
assertEquals(NO_OF_THREADS, refCount);
usedBlocksFound = true;
}
}
assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
usedBlocksFound = false;
System.out.println("Compacting");
assertEquals(2, store.getStorefilesCount());
store.triggerMajorCompaction();
region.compact(true);
// wait 10 seconds max
waitForStoreFileCount(store, 1, 10000);
assertEquals(1, store.getStorefilesCount());
// Even after compaction is done we will have some blocks that cannot
// be evicted this is because the scan is still referencing them
iterator = cache.iterator();
while (iterator.hasNext()) {
CachedBlock next = iterator.next();
BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
if (cache instanceof BucketCache) {
refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
} else if (cache instanceof CombinedBlockCache) {
refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
} else {
continue;
}
if (refCount != 0) {
// Blocks will be with count 3 as they are not yet cleared
assertEquals(NO_OF_THREADS, refCount);
usedBlocksFound = true;
}
}
assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
// Should not throw exception
compactionLatch.countDown();
latch.countDown();
for (ScanThread thread : scanThreads) {
thread.join();
}
// by this time all blocks should have been evicted
iterator = cache.iterator();
iterateBlockCache(cache, iterator);
Result r = table.get(new Get(ROW));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
// The gets would be working on new blocks
iterator = cache.iterator();
iterateBlockCache(cache, iterator);
} finally {
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestBlockEvictionFromClient method testBlockEvictionWithParallelScans.
@Test
public void testBlockEvictionWithParallelScans() throws Exception {
Table table = null;
try {
latch = new CountDownLatch(1);
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create a table with block size as 1024
table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
// get the block cache and region
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
HStore store = region.getStores().iterator().next();
CacheConfig cacheConf = store.getCacheConfig();
cacheConf.setCacheDataOnWrite(true);
cacheConf.setEvictOnClose(true);
BlockCache cache = cacheConf.getBlockCache().get();
// insert data. 2 Rows are added
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
// data was in memstore so don't expect any changes
// flush the data
// Should create one Hfile with 2 blocks
region.flush(true);
// Load cache
// Create three sets of scan
ScanThread[] scanThreads = initiateScan(table, false);
Thread.sleep(100);
checkForBlockEviction(cache, false, false);
for (ScanThread thread : scanThreads) {
thread.join();
}
// CustomInnerRegionObserver.sleepTime.set(0);
Iterator<CachedBlock> iterator = cache.iterator();
iterateBlockCache(cache, iterator);
// read the data and expect same blocks, one new hit, no misses
assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
iterator = cache.iterator();
iterateBlockCache(cache, iterator);
// Check how this miss is happening
// insert a second column, read the row, no new blocks, 3 new hits
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
byte[] data2 = Bytes.add(data, data);
put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
Result r = table.get(new Get(ROW));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
iterator = cache.iterator();
iterateBlockCache(cache, iterator);
// flush, one new block
System.out.println("Flushing cache");
region.flush(true);
iterator = cache.iterator();
iterateBlockCache(cache, iterator);
// compact, net minus two blocks, two hits, no misses
System.out.println("Compacting");
assertEquals(2, store.getStorefilesCount());
store.triggerMajorCompaction();
region.compact(true);
// wait 10 seconds max
waitForStoreFileCount(store, 1, 10000);
assertEquals(1, store.getStorefilesCount());
iterator = cache.iterator();
iterateBlockCache(cache, iterator);
// read the row, this should be a cache miss because we don't cache data
// blocks on compaction
r = table.get(new Get(ROW));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
iterator = cache.iterator();
iterateBlockCache(cache, iterator);
} finally {
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestBlockEvictionFromClient method testGetsWithMultiColumnsAndExplicitTracker.
@Test
public // TODO : check how block index works here
void testGetsWithMultiColumnsAndExplicitTracker() throws IOException, InterruptedException {
Table table = null;
try {
latch = new CountDownLatch(1);
// Check if get() returns blocks on its close() itself
getLatch = new CountDownLatch(1);
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create KV that will give you two blocks
// Create a table with block size as 1024
table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
// get the block cache and region
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
BlockCache cache = setCacheProperties(region);
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
for (int i = 1; i < 10; i++) {
put = new Put(ROW);
put.addColumn(FAMILY, Bytes.toBytes("testQualifier" + i), data2);
table.put(put);
if (i % 2 == 0) {
region.flush(true);
}
}
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
region.flush(true);
// flush the data
System.out.println("Flushing cache");
// Should create one Hfile with 2 blocks
CustomInnerRegionObserver.waitForGets.set(true);
// Create three sets of gets
GetThread[] getThreads = initiateGet(table, true, false);
Thread.sleep(200);
Iterator<CachedBlock> iterator = cache.iterator();
boolean usedBlocksFound = false;
int refCount = 0;
int noOfBlocksWithRef = 0;
while (iterator.hasNext()) {
CachedBlock next = iterator.next();
BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
if (cache instanceof BucketCache) {
refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
} else if (cache instanceof CombinedBlockCache) {
refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
} else {
continue;
}
if (refCount != 0) {
// Blocks will be with count 3
System.out.println("The refCount is " + refCount);
assertEquals(NO_OF_THREADS, refCount);
usedBlocksFound = true;
noOfBlocksWithRef++;
}
}
assertTrue(usedBlocksFound);
// the number of blocks referred
assertEquals(10, noOfBlocksWithRef);
CustomInnerRegionObserver.getCdl().get().countDown();
for (GetThread thread : getThreads) {
thread.join();
}
// Verify whether the gets have returned the blocks that it had
CustomInnerRegionObserver.waitForGets.set(true);
// giving some time for the block to be decremented
checkForBlockEviction(cache, true, false);
getLatch.countDown();
System.out.println("Gets should have returned the bloks");
} finally {
if (table != null) {
table.close();
}
}
}
Aggregations