Search in sources :

Example 6 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestBlocksScanned method _testBlocksScanned.

private void _testBlocksScanned(TableDescriptor td) throws Exception {
    BlockCache blockCache = BlockCacheFactory.createBlockCache(conf);
    RegionInfo regionInfo = RegionInfoBuilder.newBuilder(td.getTableName()).setStartKey(START_KEY).setEndKey(END_KEY).build();
    HRegion r = HBaseTestingUtil.createRegionAndWAL(regionInfo, testDir, conf, td, blockCache);
    addContent(r, FAMILY, COL);
    r.flush(true);
    CacheStats stats = blockCache.getStats();
    long before = stats.getHitCount() + stats.getMissCount();
    // Do simple test of getting one row only first.
    Scan scan = new Scan().withStartRow(Bytes.toBytes("aaa")).withStopRow(Bytes.toBytes("aaz")).setReadType(Scan.ReadType.PREAD);
    scan.addColumn(FAMILY, COL);
    scan.readVersions(1);
    InternalScanner s = r.getScanner(scan);
    List<Cell> results = new ArrayList<>();
    while (s.next(results)) ;
    s.close();
    int expectResultSize = 'z' - 'a';
    assertEquals(expectResultSize, results.size());
    int kvPerBlock = (int) Math.ceil(BLOCK_SIZE / (double) KeyValueUtil.ensureKeyValue(results.get(0)).getLength());
    assertEquals(2, kvPerBlock);
    long expectDataBlockRead = (long) Math.ceil(expectResultSize / (double) kvPerBlock);
    long expectIndexBlockRead = expectDataBlockRead;
    assertEquals(expectIndexBlockRead + expectDataBlockRead, stats.getHitCount() + stats.getMissCount() - before);
}
Also used : BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) ArrayList(java.util.ArrayList) CacheStats(org.apache.hadoop.hbase.io.hfile.CacheStats) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell)

Example 7 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestBlocksRead method testBlocksStoredWhenCachingDisabled.

/**
 * Test # of blocks read to ensure disabling cache-fill on Scan works.
 */
@Test
public void testBlocksStoredWhenCachingDisabled() throws Exception {
    byte[] TABLE = Bytes.toBytes("testBlocksReadWhenCachingDisabled");
    String FAMILY = "cf1";
    BlockCache blockCache = BlockCacheFactory.createBlockCache(conf);
    this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY, blockCache);
    try {
        putData(FAMILY, "row", "col1", 1);
        putData(FAMILY, "row", "col2", 2);
        region.flush(true);
        // Execute a scan with caching turned off
        // Expected blocks stored: 0
        long blocksStart = blockCache.getBlockCount();
        Scan scan = new Scan();
        scan.setCacheBlocks(false);
        RegionScanner rs = region.getScanner(scan);
        List<Cell> result = new ArrayList<>(2);
        rs.next(result);
        assertEquals(2 * BLOOM_TYPE.length, result.size());
        rs.close();
        long blocksEnd = blockCache.getBlockCount();
        assertEquals(blocksStart, blocksEnd);
        // Execute with caching turned on
        // Expected blocks stored: 2
        blocksStart = blocksEnd;
        scan.setCacheBlocks(true);
        rs = region.getScanner(scan);
        result = new ArrayList<>(2);
        rs.next(result);
        assertEquals(2 * BLOOM_TYPE.length, result.size());
        rs.close();
        blocksEnd = blockCache.getBlockCount();
        assertEquals(2 * BLOOM_TYPE.length, blocksEnd - blocksStart);
    } finally {
        HBaseTestingUtil.closeRegionAndWAL(this.region);
        this.region = null;
    }
}
Also used : BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 8 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestClientSideRegionScanner method testDefaultBlockCache.

@Test
public void testDefaultBlockCache() throws IOException {
    Configuration copyConf = new Configuration(conf);
    ClientSideRegionScanner clientSideRegionScanner = new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null);
    BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache();
    assertNotNull(blockCache);
    assertTrue(blockCache instanceof IndexOnlyLruBlockCache);
    assertTrue(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT == blockCache.getMaxSize());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) IndexOnlyLruBlockCache(org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) IndexOnlyLruBlockCache(org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache) Test(org.junit.Test)

Example 9 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestFromClientSide5 method testCacheOnWriteEvictOnClose.

/**
 * Tests that cache on write works all the way up from the client-side.
 *
 * Performs inserts, flushes, and compactions, verifying changes in the block
 * cache along the way.
 */
@Test
public void testCacheOnWriteEvictOnClose() throws Exception {
    final TableName tableName = name.getTableName();
    byte[] data = Bytes.toBytes("data");
    try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) {
        try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
            // get the block cache and region
            String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
            HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
            HStore store = region.getStores().iterator().next();
            CacheConfig cacheConf = store.getCacheConfig();
            cacheConf.setCacheDataOnWrite(true);
            cacheConf.setEvictOnClose(true);
            BlockCache cache = cacheConf.getBlockCache().get();
            // establish baseline stats
            long startBlockCount = cache.getBlockCount();
            long startBlockHits = cache.getStats().getHitCount();
            long startBlockMiss = cache.getStats().getMissCount();
            // wait till baseline is stable, (minimal 500 ms)
            for (int i = 0; i < 5; i++) {
                Thread.sleep(100);
                if (startBlockCount != cache.getBlockCount() || startBlockHits != cache.getStats().getHitCount() || startBlockMiss != cache.getStats().getMissCount()) {
                    startBlockCount = cache.getBlockCount();
                    startBlockHits = cache.getStats().getHitCount();
                    startBlockMiss = cache.getStats().getMissCount();
                    i = -1;
                }
            }
            // insert data
            Put put = new Put(ROW);
            put.addColumn(FAMILY, QUALIFIER, data);
            table.put(put);
            assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
            // data was in memstore so don't expect any changes
            assertEquals(startBlockCount, cache.getBlockCount());
            assertEquals(startBlockHits, cache.getStats().getHitCount());
            assertEquals(startBlockMiss, cache.getStats().getMissCount());
            // flush the data
            LOG.debug("Flushing cache");
            region.flush(true);
            // expect two more blocks in cache - DATA and ROOT_INDEX
            // , no change in hits/misses
            long expectedBlockCount = startBlockCount + 2;
            long expectedBlockHits = startBlockHits;
            long expectedBlockMiss = startBlockMiss;
            assertEquals(expectedBlockCount, cache.getBlockCount());
            assertEquals(expectedBlockHits, cache.getStats().getHitCount());
            assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
            // read the data and expect same blocks, one new hit, no misses
            assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
            assertEquals(expectedBlockCount, cache.getBlockCount());
            assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
            assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
            // insert a second column, read the row, no new blocks, one new hit
            byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
            byte[] data2 = Bytes.add(data, data);
            put = new Put(ROW);
            put.addColumn(FAMILY, QUALIFIER2, data2);
            table.put(put);
            Result r = table.get(new Get(ROW));
            assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
            assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
            assertEquals(expectedBlockCount, cache.getBlockCount());
            assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
            assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
            // flush, one new block
            System.out.println("Flushing cache");
            region.flush(true);
            // + 1 for Index Block, +1 for data block
            expectedBlockCount += 2;
            assertEquals(expectedBlockCount, cache.getBlockCount());
            assertEquals(expectedBlockHits, cache.getStats().getHitCount());
            assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
            // compact, net minus two blocks, two hits, no misses
            System.out.println("Compacting");
            assertEquals(2, store.getStorefilesCount());
            store.triggerMajorCompaction();
            region.compact(true);
            store.closeAndArchiveCompactedFiles();
            // wait 10 seconds max
            waitForStoreFileCount(store, 1, 10000);
            assertEquals(1, store.getStorefilesCount());
            // evicted two data blocks and two index blocks and compaction does not cache new blocks
            expectedBlockCount = 0;
            assertEquals(expectedBlockCount, cache.getBlockCount());
            expectedBlockHits += 2;
            assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
            assertEquals(expectedBlockHits, cache.getStats().getHitCount());
            // read the row, this should be a cache miss because we don't cache data
            // blocks on compaction
            r = table.get(new Get(ROW));
            assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
            assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
            // cached one data block
            expectedBlockCount += 1;
            assertEquals(expectedBlockCount, cache.getBlockCount());
            assertEquals(expectedBlockHits, cache.getStats().getHitCount());
            assertEquals(++expectedBlockMiss, cache.getStats().getMissCount());
        }
    }
}
Also used : MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 10 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestAvoidCellReferencesIntoShippedBlocks method testHBASE16372InReadPath.

@Test
public void testHBASE16372InReadPath() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // Create a table with block size as 1024
    try (Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, null)) {
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        final BlockCache cache = cacheConf.getBlockCache().get();
        // insert data. 5 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW2);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW2);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW3);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW3);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW4);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW4);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW5);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW5);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        // data was in memstore so don't expect any changes
        region.flush(true);
        // Load cache
        Scan s = new Scan();
        s.setMaxResultSize(1000);
        int count;
        try (ResultScanner scanner = table.getScanner(s)) {
            count = Iterables.size(scanner);
        }
        assertEquals("Count all the rows ", 6, count);
        // Scan from cache
        s = new Scan();
        // Start a scan from row3
        s.setCaching(1);
        s.withStartRow(ROW1);
        // set partial as true so that the scan can send partial columns also
        s.setAllowPartialResults(true);
        s.setMaxResultSize(1000);
        try (ScanPerNextResultScanner scanner = new ScanPerNextResultScanner(TEST_UTIL.getAsyncConnection().getTable(tableName), s)) {
            Thread evictorThread = new Thread() {

                @Override
                public void run() {
                    List<BlockCacheKey> cacheList = new ArrayList<>();
                    Iterator<CachedBlock> iterator = cache.iterator();
                    // evict all the blocks
                    while (iterator.hasNext()) {
                        CachedBlock next = iterator.next();
                        BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
                        cacheList.add(cacheKey);
                        /**
                         * There is only one Block referenced by rpc,here we evict blocks which have no rpc
                         * referenced.
                         */
                        evictBlock(cache, cacheKey);
                    }
                    try {
                        Thread.sleep(1);
                    } catch (InterruptedException e1) {
                    }
                    iterator = cache.iterator();
                    int refBlockCount = 0;
                    while (iterator.hasNext()) {
                        iterator.next();
                        refBlockCount++;
                    }
                    assertEquals("One block should be there ", 1, refBlockCount);
                    // Rescan to prepopulate the data
                    // cache this row.
                    Scan s1 = new Scan();
                    // This scan will start from ROW1 and it will populate the cache with a
                    // row that is lower than ROW3.
                    s1.withStartRow(ROW3);
                    s1.withStopRow(ROW5);
                    s1.setCaching(1);
                    try (ResultScanner scanner = table.getScanner(s1)) {
                        int count = Iterables.size(scanner);
                        assertEquals("Count the rows", 2, count);
                        int newBlockRefCount = 0;
                        List<BlockCacheKey> newCacheList = new ArrayList<>();
                        while (true) {
                            newBlockRefCount = 0;
                            newCacheList.clear();
                            iterator = cache.iterator();
                            while (iterator.hasNext()) {
                                CachedBlock next = iterator.next();
                                BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
                                newCacheList.add(cacheKey);
                            }
                            for (BlockCacheKey key : cacheList) {
                                if (newCacheList.contains(key)) {
                                    newBlockRefCount++;
                                }
                            }
                            if (newBlockRefCount == 6) {
                                break;
                            }
                        }
                        latch.countDown();
                    } catch (IOException e) {
                    }
                }
            };
            count = 0;
            while (scanner.next() != null) {
                count++;
                if (count == 2) {
                    evictorThread.start();
                    latch.await();
                }
            }
        }
        assertEquals("Count should give all rows ", 10, count);
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) ArrayList(java.util.ArrayList) IOException(java.io.IOException) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Aggregations

BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)32 Test (org.junit.Test)24 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)19 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)17 TableName (org.apache.hadoop.hbase.TableName)16 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)15 HStore (org.apache.hadoop.hbase.regionserver.HStore)13 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)12 CountDownLatch (java.util.concurrent.CountDownLatch)11 CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)10 BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)9 BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)8 Configuration (org.apache.hadoop.conf.Configuration)5 Cell (org.apache.hadoop.hbase.Cell)4 ArrayList (java.util.ArrayList)3 CacheStats (org.apache.hadoop.hbase.io.hfile.CacheStats)3 IndexOnlyLruBlockCache (org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache)3 IOException (java.io.IOException)2 Path (org.apache.hadoop.fs.Path)2 CacheEvictionStats (org.apache.hadoop.hbase.CacheEvictionStats)2