Search in sources :

Example 1 with CacheStats

use of org.apache.hadoop.hbase.io.hfile.CacheStats in project hbase by apache.

the class TestStoreFile method testCacheOnWriteEvictOnClose.

@Test
public void testCacheOnWriteEvictOnClose() throws Exception {
    Configuration conf = this.conf;
    // Find a home for our files (regiondir ("7e0102") and familyname).
    Path baseDir = new Path(new Path(testDir, "7e0102"), "twoCOWEOC");
    // Grab the block cache and get the initial hit/miss counts
    BlockCache bc = new CacheConfig(conf).getBlockCache();
    assertNotNull(bc);
    CacheStats cs = bc.getStats();
    long startHit = cs.getHitCount();
    long startMiss = cs.getMissCount();
    long startEvicted = cs.getEvictedCount();
    // Let's write a StoreFile with three blocks, with cache on write off
    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
    CacheConfig cacheConf = new CacheConfig(conf);
    Path pathCowOff = new Path(baseDir, "123456789");
    StoreFileWriter writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE);
    LOG.debug(hsf.getPath().toString());
    // Read this file, we should see 3 misses
    StoreFileReader reader = hsf.createReader();
    reader.loadFileInfo();
    StoreFileScanner scanner = getStoreFileScanner(reader, true, true);
    scanner.seek(KeyValue.LOWESTKEY);
    while (scanner.next() != null) ;
    assertEquals(startHit, cs.getHitCount());
    assertEquals(startMiss + 3, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    startMiss += 3;
    scanner.close();
    reader.close(cacheConf.shouldEvictOnClose());
    // Now write a StoreFile with three blocks, with cache on write on
    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
    cacheConf = new CacheConfig(conf);
    Path pathCowOn = new Path(baseDir, "123456788");
    writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
    hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE);
    // Read this file, we should see 3 hits
    reader = hsf.createReader();
    scanner = getStoreFileScanner(reader, true, true);
    scanner.seek(KeyValue.LOWESTKEY);
    while (scanner.next() != null) ;
    assertEquals(startHit + 3, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    startHit += 3;
    scanner.close();
    reader.close(cacheConf.shouldEvictOnClose());
    // Let's read back the two files to ensure the blocks exactly match
    hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE);
    StoreFileReader readerOne = hsf.createReader();
    readerOne.loadFileInfo();
    StoreFileScanner scannerOne = getStoreFileScanner(readerOne, true, true);
    scannerOne.seek(KeyValue.LOWESTKEY);
    hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE);
    StoreFileReader readerTwo = hsf.createReader();
    readerTwo.loadFileInfo();
    StoreFileScanner scannerTwo = getStoreFileScanner(readerTwo, true, true);
    scannerTwo.seek(KeyValue.LOWESTKEY);
    Cell kv1 = null;
    Cell kv2 = null;
    while ((kv1 = scannerOne.next()) != null) {
        kv2 = scannerTwo.next();
        assertTrue(kv1.equals(kv2));
        KeyValue keyv1 = KeyValueUtil.ensureKeyValue(kv1);
        KeyValue keyv2 = KeyValueUtil.ensureKeyValue(kv2);
        assertTrue(Bytes.compareTo(keyv1.getBuffer(), keyv1.getKeyOffset(), keyv1.getKeyLength(), keyv2.getBuffer(), keyv2.getKeyOffset(), keyv2.getKeyLength()) == 0);
        assertTrue(Bytes.compareTo(kv1.getValueArray(), kv1.getValueOffset(), kv1.getValueLength(), kv2.getValueArray(), kv2.getValueOffset(), kv2.getValueLength()) == 0);
    }
    assertNull(scannerTwo.next());
    assertEquals(startHit + 6, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    startHit += 6;
    scannerOne.close();
    readerOne.close(cacheConf.shouldEvictOnClose());
    scannerTwo.close();
    readerTwo.close(cacheConf.shouldEvictOnClose());
    // Let's close the first file with evict on close turned on
    conf.setBoolean("hbase.rs.evictblocksonclose", true);
    cacheConf = new CacheConfig(conf);
    hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE);
    reader = hsf.createReader();
    reader.close(cacheConf.shouldEvictOnClose());
    // We should have 3 new evictions but the evict count stat should not change. Eviction because
    // of HFile invalidation is not counted along with normal evictions
    assertEquals(startHit, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    // Let's close the second file with evict on close turned off
    conf.setBoolean("hbase.rs.evictblocksonclose", false);
    cacheConf = new CacheConfig(conf);
    hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE);
    reader = hsf.createReader();
    reader.close(cacheConf.shouldEvictOnClose());
    // We expect no changes
    assertEquals(startHit, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CacheStats(org.apache.hadoop.hbase.io.hfile.CacheStats) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 2 with CacheStats

use of org.apache.hadoop.hbase.io.hfile.CacheStats in project hbase by apache.

the class TestBlocksScanned method _testBlocksScanned.

private void _testBlocksScanned(HTableDescriptor table) throws Exception {
    Region r = createNewHRegion(table, START_KEY, END_KEY, TEST_UTIL.getConfiguration());
    addContent(r, FAMILY, COL);
    r.flush(true);
    CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats();
    long before = stats.getHitCount() + stats.getMissCount();
    // Do simple test of getting one row only first.
    Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
    scan.addColumn(FAMILY, COL);
    scan.setMaxVersions(1);
    InternalScanner s = r.getScanner(scan);
    List<Cell> results = new ArrayList<>();
    while (s.next(results)) ;
    s.close();
    int expectResultSize = 'z' - 'a';
    assertEquals(expectResultSize, results.size());
    int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength());
    Assert.assertEquals(2, kvPerBlock);
    long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
    long expectIndexBlockRead = expectDataBlockRead;
    assertEquals(expectIndexBlockRead + expectDataBlockRead, stats.getHitCount() + stats.getMissCount() - before);
}
Also used : ArrayList(java.util.ArrayList) CacheStats(org.apache.hadoop.hbase.io.hfile.CacheStats) Scan(org.apache.hadoop.hbase.client.Scan) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

Cell (org.apache.hadoop.hbase.Cell)2 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)2 CacheStats (org.apache.hadoop.hbase.io.hfile.CacheStats)2 ArrayList (java.util.ArrayList)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 KeyValue (org.apache.hadoop.hbase.KeyValue)1 Scan (org.apache.hadoop.hbase.client.Scan)1 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)1 Test (org.junit.Test)1