use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestBlockEvictionFromClient method testParallelGetsAndScans.
@Test
public void testParallelGetsAndScans() throws IOException, InterruptedException {
Table table = null;
try {
latch = new CountDownLatch(2);
// Check if get() returns blocks on its close() itself
getLatch = new CountDownLatch(1);
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create KV that will give you two blocks
// Create a table with block size as 1024
table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
// get the block cache and region
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
HStore store = region.getStores().iterator().next();
CacheConfig cacheConf = store.getCacheConfig();
cacheConf.setCacheDataOnWrite(true);
cacheConf.setEvictOnClose(true);
BlockCache cache = cacheConf.getBlockCache().get();
insertData(table);
// flush the data
System.out.println("Flushing cache");
// Should create one Hfile with 2 blocks
region.flush(true);
// Create three sets of scan
CustomInnerRegionObserver.waitForGets.set(true);
ScanThread[] scanThreads = initiateScan(table, false);
// Create three sets of gets
GetThread[] getThreads = initiateGet(table, false, false);
checkForBlockEviction(cache, false, false);
CustomInnerRegionObserver.waitForGets.set(false);
checkForBlockEviction(cache, false, false);
for (GetThread thread : getThreads) {
thread.join();
}
// Verify whether the gets have returned the blocks that it had
CustomInnerRegionObserver.waitForGets.set(true);
// giving some time for the block to be decremented
checkForBlockEviction(cache, true, false);
getLatch.countDown();
for (ScanThread thread : scanThreads) {
thread.join();
}
System.out.println("Scans should have returned the bloks");
// Check with either true or false
CustomInnerRegionObserver.waitForGets.set(false);
// The scan should also have released the blocks by now
checkForBlockEviction(cache, true, true);
} finally {
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestBlockEvictionFromClient method testGetWithMultipleColumnFamilies.
@Test
public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedException {
Table table = null;
try {
latch = new CountDownLatch(1);
// Check if get() returns blocks on its close() itself
getLatch = new CountDownLatch(1);
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create KV that will give you two blocks
// Create a table with block size as 1024
byte[][] fams = new byte[10][];
fams[0] = FAMILY;
for (int i = 1; i < 10; i++) {
fams[i] = (Bytes.toBytes("testFamily" + i));
}
table = TEST_UTIL.createTable(tableName, fams, 1, 1024, CustomInnerRegionObserver.class.getName());
// get the block cache and region
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
BlockCache cache = setCacheProperties(region);
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
for (int i = 1; i < 10; i++) {
put = new Put(ROW);
put.addColumn(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
table.put(put);
if (i % 2 == 0) {
region.flush(true);
}
}
region.flush(true);
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
region.flush(true);
// flush the data
System.out.println("Flushing cache");
// Should create one Hfile with 2 blocks
CustomInnerRegionObserver.waitForGets.set(true);
// Create three sets of gets
GetThread[] getThreads = initiateGet(table, true, true);
Thread.sleep(200);
Iterator<CachedBlock> iterator = cache.iterator();
boolean usedBlocksFound = false;
int refCount = 0;
int noOfBlocksWithRef = 0;
while (iterator.hasNext()) {
CachedBlock next = iterator.next();
BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
if (cache instanceof BucketCache) {
refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
} else if (cache instanceof CombinedBlockCache) {
refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
} else {
continue;
}
if (refCount != 0) {
// Blocks will be with count 3
System.out.println("The refCount is " + refCount);
assertEquals(NO_OF_THREADS, refCount);
usedBlocksFound = true;
noOfBlocksWithRef++;
}
}
assertTrue(usedBlocksFound);
// the number of blocks referred
assertEquals(3, noOfBlocksWithRef);
CustomInnerRegionObserver.getCdl().get().countDown();
for (GetThread thread : getThreads) {
thread.join();
}
// Verify whether the gets have returned the blocks that it had
CustomInnerRegionObserver.waitForGets.set(true);
// giving some time for the block to be decremented
checkForBlockEviction(cache, true, false);
getLatch.countDown();
System.out.println("Gets should have returned the bloks");
} finally {
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class RSRpcServices method clearRegionBlockCache.
@Override
public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, ClearRegionBlockCacheRequest request) throws ServiceException {
rpcPreCheck("clearRegionBlockCache");
ClearRegionBlockCacheResponse.Builder builder = ClearRegionBlockCacheResponse.newBuilder();
CacheEvictionStatsBuilder stats = CacheEvictionStats.builder();
List<HRegion> regions = getRegions(request.getRegionList(), stats);
for (HRegion region : regions) {
try {
stats = stats.append(this.server.clearRegionBlockCache(region));
} catch (Exception e) {
stats.addException(region.getRegionInfo().getRegionName(), e);
}
}
stats.withMaxCacheSize(server.getBlockCache().map(BlockCache::getMaxSize).orElse(0L));
return builder.setStats(ProtobufUtil.toCacheEvictionStats(stats.build())).build();
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestBlockEvictionFromClient method testScanWithMultipleColumnFamilies.
@Test
public void testScanWithMultipleColumnFamilies() throws IOException, InterruptedException {
Table table = null;
try {
latch = new CountDownLatch(1);
// Check if get() returns blocks on its close() itself
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create KV that will give you two blocks
// Create a table with block size as 1024
byte[][] fams = new byte[10][];
fams[0] = FAMILY;
for (int i = 1; i < 10; i++) {
fams[i] = (Bytes.toBytes("testFamily" + i));
}
table = TEST_UTIL.createTable(tableName, fams, 1, 1024, CustomInnerRegionObserver.class.getName());
// get the block cache and region
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
BlockCache cache = setCacheProperties(region);
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
for (int i = 1; i < 10; i++) {
put = new Put(ROW);
put.addColumn(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
table.put(put);
if (i % 2 == 0) {
region.flush(true);
}
}
region.flush(true);
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
region.flush(true);
// flush the data
System.out.println("Flushing cache");
// Should create one Hfile with 2 blocks
// Create three sets of gets
ScanThread[] scanThreads = initiateScan(table, true);
Thread.sleep(200);
Iterator<CachedBlock> iterator = cache.iterator();
boolean usedBlocksFound = false;
int refCount = 0;
int noOfBlocksWithRef = 0;
while (iterator.hasNext()) {
CachedBlock next = iterator.next();
BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
if (cache instanceof BucketCache) {
refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
} else if (cache instanceof CombinedBlockCache) {
refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
} else {
continue;
}
if (refCount != 0) {
// Blocks will be with count 3
System.out.println("The refCount is " + refCount);
assertEquals(NO_OF_THREADS, refCount);
usedBlocksFound = true;
noOfBlocksWithRef++;
}
}
assertTrue(usedBlocksFound);
// the number of blocks referred
assertEquals(12, noOfBlocksWithRef);
CustomInnerRegionObserver.getCdl().get().countDown();
for (ScanThread thread : scanThreads) {
thread.join();
}
// giving some time for the block to be decremented
checkForBlockEviction(cache, true, false);
} finally {
if (table != null) {
table.close();
}
}
}
use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.
the class TestBlockEvictionFromClient method testGetWithCellsInDifferentFiles.
@Test
public void testGetWithCellsInDifferentFiles() throws IOException, InterruptedException {
Table table = null;
try {
latch = new CountDownLatch(1);
// Check if get() returns blocks on its close() itself
getLatch = new CountDownLatch(1);
final TableName tableName = TableName.valueOf(name.getMethodName());
// Create KV that will give you two blocks
// Create a table with block size as 1024
table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
// get the block cache and region
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
HStore store = region.getStores().iterator().next();
CacheConfig cacheConf = store.getCacheConfig();
cacheConf.setCacheDataOnWrite(true);
cacheConf.setEvictOnClose(true);
BlockCache cache = cacheConf.getBlockCache().get();
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
put = new Put(ROW1);
put.addColumn(FAMILY, QUALIFIER, data);
table.put(put);
region.flush(true);
byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER2, data2);
table.put(put);
region.flush(true);
// flush the data
System.out.println("Flushing cache");
// Should create one Hfile with 2 blocks
CustomInnerRegionObserver.waitForGets.set(true);
// Create three sets of gets
GetThread[] getThreads = initiateGet(table, false, false);
Thread.sleep(200);
CustomInnerRegionObserver.getCdl().get().countDown();
for (GetThread thread : getThreads) {
thread.join();
}
// Verify whether the gets have returned the blocks that it had
CustomInnerRegionObserver.waitForGets.set(true);
// giving some time for the block to be decremented
checkForBlockEviction(cache, true, false);
getLatch.countDown();
System.out.println("Gets should have returned the bloks");
} finally {
if (table != null) {
table.close();
}
}
}
Aggregations