Search in sources :

Example 26 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method testBlockEvictionAfterHBASE13082WithCompactionAndFlush.

@Test
public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() throws IOException, InterruptedException {
    // do flush and scan in parallel
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        compactionLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache().get();
        // insert data. 2 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // read the data and expect same blocks, one new hit, no misses
        int refCount = 0;
        // Check how this miss is happening
        // insert a second column, read the row, no new blocks, 3 new hits
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        Iterator<CachedBlock> iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // Create three sets of scan
        ScanThread[] scanThreads = initiateScan(table, false);
        Thread.sleep(100);
        iterator = cache.iterator();
        boolean usedBlocksFound = false;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        // Make a put and do a flush
        QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        data2 = Bytes.add(data, data);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        usedBlocksFound = false;
        System.out.println("Compacting");
        assertEquals(3, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        // Even after compaction is done we will have some blocks that cannot
        // be evicted this is because the scan is still referencing them
        iterator = cache.iterator();
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3 as they are not yet cleared
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        // Should not throw exception
        compactionLatch.countDown();
        latch.countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        // by this time all blocks should have been evicted
        iterator = cache.iterator();
        // Since a flush and compaction happened after a scan started
        // we need to ensure that all the original blocks of the compacted file
        // is also removed.
        iterateBlockCache(cache, iterator);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        // The gets would be working on new blocks
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 27 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestClientSideRegionScanner method testConfiguredBlockCache.

@Test
public void testConfiguredBlockCache() throws IOException {
    Configuration copyConf = new Configuration(conf);
    // tiny 1MB fixed cache size
    long blockCacheFixedSize = 1024 * 1024L;
    copyConf.setLong(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY, blockCacheFixedSize);
    ClientSideRegionScanner clientSideRegionScanner = new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null);
    BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache();
    assertNotNull(blockCache);
    assertTrue(blockCache instanceof IndexOnlyLruBlockCache);
    assertTrue(blockCacheFixedSize == blockCache.getMaxSize());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) IndexOnlyLruBlockCache(org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) IndexOnlyLruBlockCache(org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache) Test(org.junit.Test)

Example 28 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestClientSideRegionScanner method testNoBlockCache.

@Test
public void testNoBlockCache() throws IOException {
    Configuration copyConf = new Configuration(conf);
    copyConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
    ClientSideRegionScanner clientSideRegionScanner = new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, null);
    BlockCache blockCache = clientSideRegionScanner.getRegion().getBlockCache();
    assertNull(blockCache);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) IndexOnlyLruBlockCache(org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache) Test(org.junit.Test)

Example 29 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestCacheOnWriteInSchema method readStoreFile.

private void readStoreFile(Path path) throws IOException {
    CacheConfig cacheConf = store.getCacheConfig();
    BlockCache cache = cacheConf.getBlockCache().get();
    HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, true);
    sf.initReader();
    HFile.Reader reader = sf.getReader().getHFileReader();
    try {
        // Open a scanner with (on read) caching disabled
        HFileScanner scanner = reader.getScanner(conf, false, false);
        assertTrue(testDescription, scanner.seekTo());
        // Cribbed from io.hfile.TestCacheOnWrite
        long offset = 0;
        while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
            // Flags: don't cache the block, use pread, this is not a compaction.
            // Also, pass null for expected block type to avoid checking it.
            HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE);
            BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
            boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
            boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
            final BlockType blockType = block.getBlockType();
            if (shouldBeCached != isCached && (cowType.blockType1.equals(blockType) || cowType.blockType2.equals(blockType))) {
                throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block + "\n" + "blockCacheKey: " + blockCacheKey);
            }
            offset += block.getOnDiskSizeWithHeader();
        }
    } finally {
        reader.close();
    }
}
Also used : HFileBlock(org.apache.hadoop.hbase.io.hfile.HFileBlock) BlockType(org.apache.hadoop.hbase.io.hfile.BlockType) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFile(org.apache.hadoop.hbase.io.hfile.HFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey)

Example 30 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestClearRegionBlockCache method testClearBlockCache.

@Test
public void testClearBlockCache() throws Exception {
    BlockCache blockCache1 = rs1.getBlockCache().get();
    BlockCache blockCache2 = rs2.getBlockCache().get();
    long initialBlockCount1 = blockCache1.getBlockCount();
    long initialBlockCount2 = blockCache2.getBlockCount();
    // scan will cause blocks to be added in BlockCache
    scanAllRegionsForRS(rs1);
    assertEquals(blockCache1.getBlockCount() - initialBlockCount1, HTU.getNumHFilesForRS(rs1, TABLE_NAME, FAMILY));
    clearRegionBlockCache(rs1);
    scanAllRegionsForRS(rs2);
    assertEquals(blockCache2.getBlockCount() - initialBlockCount2, HTU.getNumHFilesForRS(rs2, TABLE_NAME, FAMILY));
    clearRegionBlockCache(rs2);
    assertEquals("" + blockCache1.getBlockCount(), initialBlockCount1, blockCache1.getBlockCount());
    assertEquals("" + blockCache2.getBlockCount(), initialBlockCount2, blockCache2.getBlockCount());
}
Also used : BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) Test(org.junit.Test)

Aggregations

BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)32 Test (org.junit.Test)24 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)19 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)17 TableName (org.apache.hadoop.hbase.TableName)16 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)15 HStore (org.apache.hadoop.hbase.regionserver.HStore)13 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)12 CountDownLatch (java.util.concurrent.CountDownLatch)11 CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)10 BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)9 BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)8 Configuration (org.apache.hadoop.conf.Configuration)5 Cell (org.apache.hadoop.hbase.Cell)4 ArrayList (java.util.ArrayList)3 CacheStats (org.apache.hadoop.hbase.io.hfile.CacheStats)3 IndexOnlyLruBlockCache (org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache)3 IOException (java.io.IOException)2 Path (org.apache.hadoop.fs.Path)2 CacheEvictionStats (org.apache.hadoop.hbase.CacheEvictionStats)2