use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class TestCacheOnWriteInSchema method readStoreFile.
private void readStoreFile(Path path) throws IOException {
CacheConfig cacheConf = store.getCacheConfig();
BlockCache cache = cacheConf.getBlockCache();
StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL);
HFile.Reader reader = sf.createReader().getHFileReader();
try {
// Open a scanner with (on read) caching disabled
HFileScanner scanner = reader.getScanner(false, false);
assertTrue(testDescription, scanner.seekTo());
// Cribbed from io.hfile.TestCacheOnWrite
long offset = 0;
while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
// Flags: don't cache the block, use pread, this is not a compaction.
// Also, pass null for expected block type to avoid checking it.
HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE);
BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
if (shouldBeCached != isCached) {
throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block + "\n" + "blockCacheKey: " + blockCacheKey);
}
offset += block.getOnDiskSizeWithHeader();
}
} finally {
reader.close();
}
}
use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class TestCompoundBloomFilter method writeStoreFile.
private Path writeStoreFile(int t, BloomType bt, List<KeyValue> kvs) throws IOException {
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZES[t]);
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
cacheConf = new CacheConfig(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build();
StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConf, fs).withOutputDir(TEST_UTIL.getDataTestDir()).withBloomType(bt).withFileContext(meta).build();
assertTrue(w.hasGeneralBloom());
assertTrue(w.getGeneralBloomWriter() instanceof CompoundBloomFilterWriter);
CompoundBloomFilterWriter cbbf = (CompoundBloomFilterWriter) w.getGeneralBloomWriter();
int keyCount = 0;
KeyValue prev = null;
LOG.debug("Total keys/values to insert: " + kvs.size());
for (KeyValue kv : kvs) {
w.append(kv);
// Validate the key count in the Bloom filter.
boolean newKey = true;
if (prev != null) {
newKey = !(bt == BloomType.ROW ? CellUtil.matchingRows(kv, prev) : CellUtil.matchingRowColumn(kv, prev));
}
if (newKey)
++keyCount;
assertEquals(keyCount, cbbf.getKeyCount());
prev = kv;
}
w.close();
return w.getPath();
}
use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class DataBlockEncodingTool method testCodecs.
/**
* Test a data block encoder on the given HFile. Output results to console.
* @param kvLimit The limit of KeyValue which will be analyzed.
* @param hfilePath an HFile path on the file system.
* @param compressionName Compression algorithm used for comparison.
* @param doBenchmark Run performance benchmarks.
* @param doVerify Verify correctness.
* @throws IOException When pathName is incorrect.
*/
public static void testCodecs(Configuration conf, int kvLimit, String hfilePath, String compressionName, boolean doBenchmark, boolean doVerify) throws IOException {
// create environment
Path path = new Path(hfilePath);
CacheConfig cacheConf = new CacheConfig(conf);
FileSystem fs = FileSystem.get(conf);
StoreFile hsf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE);
StoreFileReader reader = hsf.createReader();
reader.loadFileInfo();
KeyValueScanner scanner = reader.getStoreFileScanner(true, true, false, 0, 0, false);
// run the utilities
DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
int majorVersion = reader.getHFileVersion();
comp.useHBaseChecksum = majorVersion > 2 || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderImpl.MINOR_VERSION_WITH_CHECKSUM);
comp.checkStatistics(scanner, kvLimit);
if (doVerify) {
comp.verifyCodecs(scanner, kvLimit);
}
if (doBenchmark) {
comp.benchmarkCodecs();
}
comp.displayStatistics();
// cleanup
scanner.close();
reader.close(cacheConf.shouldEvictOnClose());
}
use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class TestBlocksScanned method _testBlocksScanned.
private void _testBlocksScanned(HTableDescriptor table) throws Exception {
Region r = createNewHRegion(table, START_KEY, END_KEY, TEST_UTIL.getConfiguration());
addContent(r, FAMILY, COL);
r.flush(true);
CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats();
long before = stats.getHitCount() + stats.getMissCount();
// Do simple test of getting one row only first.
Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
scan.addColumn(FAMILY, COL);
scan.setMaxVersions(1);
InternalScanner s = r.getScanner(scan);
List<Cell> results = new ArrayList<>();
while (s.next(results)) ;
s.close();
int expectResultSize = 'z' - 'a';
assertEquals(expectResultSize, results.size());
int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength());
Assert.assertEquals(2, kvPerBlock);
long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
long expectIndexBlockRead = expectDataBlockRead;
assertEquals(expectIndexBlockRead + expectDataBlockRead, stats.getHitCount() + stats.getMissCount() - before);
}
use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.
the class TestFSErrorsExposed method testHFileScannerThrowsErrors.
/**
* Injects errors into the pread calls of an on-disk file, and makes
* sure those bubble up to the HFile scanner
*/
@Test
public void testHFileScannerThrowsErrors() throws IOException {
Path hfilePath = new Path(new Path(util.getDataTestDir("internalScannerExposesErrors"), "regionname"), "familyname");
HFileSystem hfs = (HFileSystem) util.getTestFileSystem();
FaultyFileSystem faultyfs = new FaultyFileSystem(hfs.getBackingFs());
FileSystem fs = new HFileSystem(faultyfs);
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build();
StoreFileWriter writer = new StoreFileWriter.Builder(util.getConfiguration(), cacheConf, hfs).withOutputDir(hfilePath).withFileContext(meta).build();
TestStoreFile.writeStoreFile(writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE);
StoreFileReader reader = sf.createReader();
HFileScanner scanner = reader.getScanner(false, true);
FaultyInputStream inStream = faultyfs.inStreams.get(0).get();
assertNotNull(inStream);
scanner.seekTo();
// Do at least one successful read
assertTrue(scanner.next());
faultyfs.startFaults();
try {
int scanned = 0;
while (scanner.next()) {
scanned++;
}
fail("Scanner didn't throw after faults injected");
} catch (IOException ioe) {
LOG.info("Got expected exception", ioe);
assertTrue(ioe.getMessage().contains("Fault"));
}
// end of test so evictOnClose
reader.close(true);
}
Aggregations