use of org.apache.hadoop.hbase.io.hfile.bucket.BucketCache in project hbase by apache.
the class TestCacheOnWrite method testCachingDataBlocksDuringCompactionInternals.
private void testCachingDataBlocksDuringCompactionInternals(boolean useTags, boolean cacheBlocksOnCompaction, long cacheBlocksOnCompactionThreshold) throws IOException, InterruptedException {
// create a localConf
boolean localValue = conf.getBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, false);
long localCacheCompactedBlocksThreshold = conf.getLong(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, CacheConfig.DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD);
boolean localCacheBloomBlocksValue = conf.getBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, CacheConfig.DEFAULT_CACHE_BLOOMS_ON_WRITE);
boolean localCacheIndexBlocksValue = conf.getBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, CacheConfig.DEFAULT_CACHE_INDEXES_ON_WRITE);
try {
// Set the conf if testing caching compacted blocks on write
conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, cacheBlocksOnCompaction);
// set size threshold if testing compaction size threshold
if (cacheBlocksOnCompactionThreshold > 0) {
conf.setLong(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, cacheBlocksOnCompactionThreshold);
}
// TODO: need to change this test if we add a cache size threshold for
// compactions, or if we implement some other kind of intelligent logic for
// deciding what blocks to cache-on-write on compaction.
final String table = "CompactionCacheOnWrite";
final String cf = "myCF";
final byte[] cfBytes = Bytes.toBytes(cf);
final int maxVersions = 3;
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfBytes).setCompressionType(compress).setBloomFilterType(BLOOM_TYPE).setMaxVersions(maxVersions).setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()).build();
HRegion region = TEST_UTIL.createTestRegion(table, cfd, blockCache);
int rowIdx = 0;
long ts = EnvironmentEdgeManager.currentTime();
for (int iFile = 0; iFile < 5; ++iFile) {
for (int iRow = 0; iRow < 500; ++iRow) {
String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + iRow;
Put p = new Put(Bytes.toBytes(rowStr));
++rowIdx;
for (int iCol = 0; iCol < 10; ++iCol) {
String qualStr = "col" + iCol;
String valueStr = "value_" + rowStr + "_" + qualStr;
for (int iTS = 0; iTS < 5; ++iTS) {
if (useTags) {
Tag t = new ArrayBackedTag((byte) 1, "visibility");
Tag[] tags = new Tag[1];
tags[0] = t;
KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr), HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
p.add(kv);
} else {
KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr));
p.add(kv);
}
}
}
p.setDurability(Durability.ASYNC_WAL);
region.put(p);
}
region.flush(true);
}
clearBlockCache(blockCache);
assertEquals(0, blockCache.getBlockCount());
region.compact(false);
LOG.debug("compactStores() returned");
boolean dataBlockCached = false;
boolean bloomBlockCached = false;
boolean indexBlockCached = false;
for (CachedBlock block : blockCache) {
if (DATA_BLOCK_TYPES.contains(block.getBlockType())) {
dataBlockCached = true;
} else if (BLOOM_BLOCK_TYPES.contains(block.getBlockType())) {
bloomBlockCached = true;
} else if (INDEX_BLOCK_TYPES.contains(block.getBlockType())) {
indexBlockCached = true;
}
}
// Data blocks should be cached in instances where we are caching blocks on write. In the case
// of testing
// BucketCache, we cannot verify block type as it is not stored in the cache.
boolean cacheOnCompactAndNonBucketCache = cacheBlocksOnCompaction && !(blockCache instanceof BucketCache);
String assertErrorMessage = "\nTest description: " + testDescription + "\ncacheBlocksOnCompaction: " + cacheBlocksOnCompaction + "\n";
if (cacheOnCompactAndNonBucketCache && cacheBlocksOnCompactionThreshold > 0) {
if (cacheBlocksOnCompactionThreshold == CACHE_COMPACTION_HIGH_THRESHOLD) {
assertTrue(assertErrorMessage, dataBlockCached);
assertTrue(assertErrorMessage, bloomBlockCached);
assertTrue(assertErrorMessage, indexBlockCached);
} else {
assertFalse(assertErrorMessage, dataBlockCached);
if (localCacheBloomBlocksValue) {
assertTrue(assertErrorMessage, bloomBlockCached);
} else {
assertFalse(assertErrorMessage, bloomBlockCached);
}
if (localCacheIndexBlocksValue) {
assertTrue(assertErrorMessage, indexBlockCached);
} else {
assertFalse(assertErrorMessage, indexBlockCached);
}
}
} else {
assertEquals(assertErrorMessage, cacheOnCompactAndNonBucketCache, dataBlockCached);
if (cacheOnCompactAndNonBucketCache) {
assertTrue(assertErrorMessage, bloomBlockCached);
assertTrue(assertErrorMessage, indexBlockCached);
}
}
region.close();
} finally {
// reset back
conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, localValue);
conf.setLong(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY, localCacheCompactedBlocksThreshold);
conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, localCacheBloomBlocksValue);
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, localCacheIndexBlocksValue);
}
}
use of org.apache.hadoop.hbase.io.hfile.bucket.BucketCache in project hbase by apache.
the class TestCacheConfig method doBucketCacheConfigTest.
private void doBucketCacheConfigTest() {
final int bcSize = 100;
this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
CacheConfig cc = new CacheConfig(this.conf);
BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf);
basicBlockCacheOps(blockCache, cc, false, false);
assertTrue(blockCache instanceof CombinedBlockCache);
// TODO: Assert sizes allocated are right and proportions.
CombinedBlockCache cbc = (CombinedBlockCache) blockCache;
BlockCache[] bcs = cbc.getBlockCaches();
assertTrue(bcs[0] instanceof LruBlockCache);
LruBlockCache lbc = (LruBlockCache) bcs[0];
assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize());
assertTrue(bcs[1] instanceof BucketCache);
BucketCache bc = (BucketCache) bcs[1];
// getMaxSize comes back in bytes but we specified size in MB
assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024));
}
Aggregations