Search in sources :

Example 1 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestStoreFile method testCacheOnWriteEvictOnClose.

@Test
public void testCacheOnWriteEvictOnClose() throws Exception {
    Configuration conf = this.conf;
    // Find a home for our files (regiondir ("7e0102") and familyname).
    Path baseDir = new Path(new Path(testDir, "7e0102"), "twoCOWEOC");
    // Grab the block cache and get the initial hit/miss counts
    BlockCache bc = new CacheConfig(conf).getBlockCache();
    assertNotNull(bc);
    CacheStats cs = bc.getStats();
    long startHit = cs.getHitCount();
    long startMiss = cs.getMissCount();
    long startEvicted = cs.getEvictedCount();
    // Let's write a StoreFile with three blocks, with cache on write off
    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
    CacheConfig cacheConf = new CacheConfig(conf);
    Path pathCowOff = new Path(baseDir, "123456789");
    StoreFileWriter writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE);
    LOG.debug(hsf.getPath().toString());
    // Read this file, we should see 3 misses
    StoreFileReader reader = hsf.createReader();
    reader.loadFileInfo();
    StoreFileScanner scanner = getStoreFileScanner(reader, true, true);
    scanner.seek(KeyValue.LOWESTKEY);
    while (scanner.next() != null) ;
    assertEquals(startHit, cs.getHitCount());
    assertEquals(startMiss + 3, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    startMiss += 3;
    scanner.close();
    reader.close(cacheConf.shouldEvictOnClose());
    // Now write a StoreFile with three blocks, with cache on write on
    conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
    cacheConf = new CacheConfig(conf);
    Path pathCowOn = new Path(baseDir, "123456788");
    writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
    hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE);
    // Read this file, we should see 3 hits
    reader = hsf.createReader();
    scanner = getStoreFileScanner(reader, true, true);
    scanner.seek(KeyValue.LOWESTKEY);
    while (scanner.next() != null) ;
    assertEquals(startHit + 3, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    startHit += 3;
    scanner.close();
    reader.close(cacheConf.shouldEvictOnClose());
    // Let's read back the two files to ensure the blocks exactly match
    hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE);
    StoreFileReader readerOne = hsf.createReader();
    readerOne.loadFileInfo();
    StoreFileScanner scannerOne = getStoreFileScanner(readerOne, true, true);
    scannerOne.seek(KeyValue.LOWESTKEY);
    hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE);
    StoreFileReader readerTwo = hsf.createReader();
    readerTwo.loadFileInfo();
    StoreFileScanner scannerTwo = getStoreFileScanner(readerTwo, true, true);
    scannerTwo.seek(KeyValue.LOWESTKEY);
    Cell kv1 = null;
    Cell kv2 = null;
    while ((kv1 = scannerOne.next()) != null) {
        kv2 = scannerTwo.next();
        assertTrue(kv1.equals(kv2));
        KeyValue keyv1 = KeyValueUtil.ensureKeyValue(kv1);
        KeyValue keyv2 = KeyValueUtil.ensureKeyValue(kv2);
        assertTrue(Bytes.compareTo(keyv1.getBuffer(), keyv1.getKeyOffset(), keyv1.getKeyLength(), keyv2.getBuffer(), keyv2.getKeyOffset(), keyv2.getKeyLength()) == 0);
        assertTrue(Bytes.compareTo(kv1.getValueArray(), kv1.getValueOffset(), kv1.getValueLength(), kv2.getValueArray(), kv2.getValueOffset(), kv2.getValueLength()) == 0);
    }
    assertNull(scannerTwo.next());
    assertEquals(startHit + 6, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    startHit += 6;
    scannerOne.close();
    readerOne.close(cacheConf.shouldEvictOnClose());
    scannerTwo.close();
    readerTwo.close(cacheConf.shouldEvictOnClose());
    // Let's close the first file with evict on close turned on
    conf.setBoolean("hbase.rs.evictblocksonclose", true);
    cacheConf = new CacheConfig(conf);
    hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE);
    reader = hsf.createReader();
    reader.close(cacheConf.shouldEvictOnClose());
    // We should have 3 new evictions but the evict count stat should not change. Eviction because
    // of HFile invalidation is not counted along with normal evictions
    assertEquals(startHit, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
    // Let's close the second file with evict on close turned off
    conf.setBoolean("hbase.rs.evictblocksonclose", false);
    cacheConf = new CacheConfig(conf);
    hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE);
    reader = hsf.createReader();
    reader.close(cacheConf.shouldEvictOnClose());
    // We expect no changes
    assertEquals(startHit, cs.getHitCount());
    assertEquals(startMiss, cs.getMissCount());
    assertEquals(startEvicted, cs.getEvictedCount());
}
Also used : Path(org.apache.hadoop.fs.Path) KeyValue(org.apache.hadoop.hbase.KeyValue) Configuration(org.apache.hadoop.conf.Configuration) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CacheStats(org.apache.hadoop.hbase.io.hfile.CacheStats) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 2 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestFromClientSide method testCacheOnWriteEvictOnClose.

/**
   * Tests that cache on write works all the way up from the client-side.
   *
   * Performs inserts, flushes, and compactions, verifying changes in the block
   * cache along the way.
   *
   * @throws Exception
   */
@Test
public void testCacheOnWriteEvictOnClose() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[] data = Bytes.toBytes("data");
    Table table = TEST_UTIL.createTable(tableName, FAMILY);
    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        // get the block cache and region
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        // establish baseline stats
        long startBlockCount = cache.getBlockCount();
        long startBlockHits = cache.getStats().getHitCount();
        long startBlockMiss = cache.getStats().getMissCount();
        // wait till baseline is stable, (minimal 500 ms)
        for (int i = 0; i < 5; i++) {
            Thread.sleep(100);
            if (startBlockCount != cache.getBlockCount() || startBlockHits != cache.getStats().getHitCount() || startBlockMiss != cache.getStats().getMissCount()) {
                startBlockCount = cache.getBlockCount();
                startBlockHits = cache.getStats().getHitCount();
                startBlockMiss = cache.getStats().getMissCount();
                i = -1;
            }
        }
        // insert data
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // data was in memstore so don't expect any changes
        assertEquals(startBlockCount, cache.getBlockCount());
        assertEquals(startBlockHits, cache.getStats().getHitCount());
        assertEquals(startBlockMiss, cache.getStats().getMissCount());
        // flush the data
        System.out.println("Flushing cache");
        region.flush(true);
        // expect one more block in cache, no change in hits/misses
        long expectedBlockCount = startBlockCount + 1;
        long expectedBlockHits = startBlockHits;
        long expectedBlockMiss = startBlockMiss;
        assertEquals(expectedBlockCount, cache.getBlockCount());
        assertEquals(expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        // read the data and expect same blocks, one new hit, no misses
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        assertEquals(expectedBlockCount, cache.getBlockCount());
        assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        // insert a second column, read the row, no new blocks, one new hit
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        assertEquals(expectedBlockCount, cache.getBlockCount());
        assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        assertEquals(++expectedBlockCount, cache.getBlockCount());
        assertEquals(expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        // compact, net minus two blocks, two hits, no misses
        System.out.println("Compacting");
        assertEquals(2, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        // evicted two blocks, cached none
        expectedBlockCount -= 2;
        assertEquals(expectedBlockCount, cache.getBlockCount());
        expectedBlockHits += 2;
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        assertEquals(expectedBlockHits, cache.getStats().getHitCount());
        // read the row, this should be a cache miss because we don't cache data
        // blocks on compaction
        r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        // cached one data block
        expectedBlockCount += 1;
        assertEquals(expectedBlockCount, cache.getBlockCount());
        assertEquals(expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(++expectedBlockMiss, cache.getStats().getMissCount());
    }
}
Also used : Store(org.apache.hadoop.hbase.regionserver.Store) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 3 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method setCacheProperties.

private BlockCache setCacheProperties(Region region) {
    Iterator<Store> strItr = region.getStores().iterator();
    BlockCache cache = null;
    while (strItr.hasNext()) {
        Store store = strItr.next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        // Use the last one
        cache = cacheConf.getBlockCache();
    }
    return cache;
}
Also used : BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) Store(org.apache.hadoop.hbase.regionserver.Store) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Example 4 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestClearRegionBlockCache method testClearBlockCacheFromAdmin.

@Test
public void testClearBlockCacheFromAdmin() throws Exception {
    Admin admin = HTU.getAdmin();
    BlockCache blockCache1 = rs1.getBlockCache().get();
    BlockCache blockCache2 = rs2.getBlockCache().get();
    long initialBlockCount1 = blockCache1.getBlockCount();
    long initialBlockCount2 = blockCache2.getBlockCount();
    // scan will cause blocks to be added in BlockCache
    scanAllRegionsForRS(rs1);
    assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
    scanAllRegionsForRS(rs2);
    assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
    CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME);
    assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
    assertEquals(initialBlockCount1, blockCache1.getBlockCount());
    assertEquals(initialBlockCount2, blockCache2.getBlockCount());
}
Also used : CacheEvictionStats(org.apache.hadoop.hbase.CacheEvictionStats) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) AsyncAdmin(org.apache.hadoop.hbase.client.AsyncAdmin) Admin(org.apache.hadoop.hbase.client.Admin) Test(org.junit.Test)

Example 5 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestClearRegionBlockCache method testClearBlockCacheFromAsyncAdmin.

@Test
public void testClearBlockCacheFromAsyncAdmin() throws Exception {
    try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(HTU.getConfiguration()).get()) {
        AsyncAdmin admin = conn.getAdmin();
        BlockCache blockCache1 = rs1.getBlockCache().get();
        BlockCache blockCache2 = rs2.getBlockCache().get();
        long initialBlockCount1 = blockCache1.getBlockCount();
        long initialBlockCount2 = blockCache2.getBlockCount();
        // scan will cause blocks to be added in BlockCache
        scanAllRegionsForRS(rs1);
        assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
        scanAllRegionsForRS(rs2);
        assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
        CacheEvictionStats stats = admin.clearBlockCache(TABLE_NAME).get();
        assertEquals(stats.getEvictedBlocks(), HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY) + HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
        assertEquals(initialBlockCount1, blockCache1.getBlockCount());
        assertEquals(initialBlockCount2, blockCache2.getBlockCount());
    }
}
Also used : AsyncAdmin(org.apache.hadoop.hbase.client.AsyncAdmin) CacheEvictionStats(org.apache.hadoop.hbase.CacheEvictionStats) AsyncConnection(org.apache.hadoop.hbase.client.AsyncConnection) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) Test(org.junit.Test)

Aggregations

BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)32 Test (org.junit.Test)24 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)19 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)17 TableName (org.apache.hadoop.hbase.TableName)16 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)15 HStore (org.apache.hadoop.hbase.regionserver.HStore)13 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)12 CountDownLatch (java.util.concurrent.CountDownLatch)11 CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)10 BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)9 BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)8 Configuration (org.apache.hadoop.conf.Configuration)5 Cell (org.apache.hadoop.hbase.Cell)4 ArrayList (java.util.ArrayList)3 CacheStats (org.apache.hadoop.hbase.io.hfile.CacheStats)3 IndexOnlyLruBlockCache (org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache)3 IOException (java.io.IOException)2 Path (org.apache.hadoop.fs.Path)2 CacheEvictionStats (org.apache.hadoop.hbase.CacheEvictionStats)2