use of org.apache.hadoop.hbase.io.hfile.BlockType in project hbase by apache.
the class TestCacheOnWriteInSchema method readStoreFile.
private void readStoreFile(Path path) throws IOException {
CacheConfig cacheConf = store.getCacheConfig();
BlockCache cache = cacheConf.getBlockCache().get();
HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
sf.initReader();
HFile.Reader reader = sf.getReader().getHFileReader();
try {
// Open a scanner with (on read) caching disabled
HFileScanner scanner = reader.getScanner(conf, false, false);
assertTrue(testDescription, scanner.seekTo());
// Cribbed from io.hfile.TestCacheOnWrite
long offset = 0;
while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
// Flags: don't cache the block, use pread, this is not a compaction.
// Also, pass null for expected block type to avoid checking it.
HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE);
BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
final BlockType blockType = block.getBlockType();
if (shouldBeCached != isCached && (cowType.blockType1.equals(blockType) || cowType.blockType2.equals(blockType))) {
throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block + "\n" + "blockCacheKey: " + blockCacheKey);
}
offset += block.getOnDiskSizeWithHeader();
}
} finally {
reader.close();
}
}
Aggregations