Search in sources :

Example 16 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method testScanWithException.

@Test
public void testScanWithException() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        exceptionLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache().get();
        // insert data. 2 Rows are added
        insertData(table);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // CustomInnerRegionObserver.sleepTime.set(5000);
        CustomInnerRegionObserver.throwException.set(true);
        ScanThread[] scanThreads = initiateScan(table, false);
        // The block would have been decremented for the scan case as it was
        // wrapped
        // before even the postNext hook gets executed.
        // giving some time for the block to be decremented
        Thread.sleep(100);
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean usedBlocksFound = false;
        int refCount = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue(usedBlocksFound);
        exceptionLatch.countDown();
        // countdown the latch
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        iterator = cache.iterator();
        usedBlocksFound = false;
        refCount = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertFalse(usedBlocksFound);
        // you should always see 0 ref count. since after HBASE-16604 we always recreate the scanner
        assertEquals(0, refCount);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 17 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method testParallelGetsAndScanWithWrappedRegionScanner.

@Test
public void testParallelGetsAndScanWithWrappedRegionScanner() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(2);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache().get();
        // insert data. 2 Rows are added
        insertData(table);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // CustomInnerRegionObserver.sleepTime.set(5000);
        // Create three sets of scan
        CustomInnerRegionObserver.waitForGets.set(true);
        ScanThread[] scanThreads = initiateScan(table, false);
        // Create three sets of gets
        GetThread[] getThreads = initiateGet(table, false, false);
        // The block would have been decremented for the scan case as it was
        // wrapped
        // before even the postNext hook gets executed.
        // giving some time for the block to be decremented
        Thread.sleep(100);
        CustomInnerRegionObserver.waitForGets.set(false);
        checkForBlockEviction(cache, false, false);
        // countdown the latch
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (GetThread thread : getThreads) {
            thread.join();
        }
        getLatch.countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 18 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method testMultiGets.

@Test
public void testMultiGets() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(2);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache().get();
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        CustomInnerRegionObserver.waitForGets.set(true);
        // Create three sets of gets
        MultiGetThread[] getThreads = initiateMultiGet(table);
        Thread.sleep(200);
        int refCount;
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean foundNonZeroBlock = false;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                assertEquals(NO_OF_THREADS, refCount);
                foundNonZeroBlock = true;
            }
        }
        assertTrue("Should have found nonzero ref count block", foundNonZeroBlock);
        CustomInnerRegionObserver.getCdl().get().countDown();
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (MultiGetThread thread : getThreads) {
            thread.join();
        }
        // Verify whether the gets have returned the blocks that it had
        CustomInnerRegionObserver.waitForGets.set(true);
        // giving some time for the block to be decremented
        iterateBlockCache(cache, iterator);
        getLatch.countDown();
        System.out.println("Gets should have returned the bloks");
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 19 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method setCacheProperties.

private BlockCache setCacheProperties(HRegion region) {
    Iterator<HStore> strItr = region.getStores().iterator();
    BlockCache cache = null;
    while (strItr.hasNext()) {
        HStore store = strItr.next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        // Use the last one
        cache = cacheConf.getBlockCache().get();
    }
    return cache;
}
Also used : BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Example 20 with BlockCache

use of org.apache.hadoop.hbase.io.hfile.BlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method testBlockRefCountAfterSplits.

@Test
public void testBlockRefCountAfterSplits() throws IOException, InterruptedException {
    Table table = null;
    try {
        final TableName tableName = TableName.valueOf(name.getMethodName());
        TableDescriptor desc = TEST_UTIL.createTableDescriptor(tableName);
        // This test expects rpc refcount of cached data blocks to be 0 after split. After split,
        // two daughter regions are opened and a compaction is scheduled to get rid of reference
        // of the parent region hfiles. Compaction will increase refcount of cached data blocks by 1.
        // It is flakey since compaction can kick in anytime. To solve this issue, table is created
        // with compaction disabled.
        table = TEST_UTIL.createTable(TableDescriptorBuilder.newBuilder(desc).setCompactionEnabled(false).build(), FAMILIES_1, null, BloomType.ROW, 1024, null);
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache().get();
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW2);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        put = new Put(ROW3);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        ServerName rs = Iterables.getOnlyElement(TEST_UTIL.getAdmin().getRegionServers());
        int regionCount = TEST_UTIL.getAdmin().getRegions(rs).size();
        LOG.info("About to SPLIT on {} {}, count={}", Bytes.toString(ROW1), region.getRegionInfo(), regionCount);
        TEST_UTIL.getAdmin().split(tableName, ROW1);
        // Wait for splits
        TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getAdmin().getRegions(rs).size() > regionCount);
        region.compact(true);
        List<HRegion> regions = TEST_UTIL.getMiniHBaseCluster().getRegionServer(rs).getRegions();
        for (HRegion r : regions) {
            LOG.info("" + r.getCompactionState());
            TEST_UTIL.waitFor(30000, () -> r.getCompactionState().equals(CompactionState.NONE));
        }
        LOG.info("Split finished, is region closed {} {}", region.isClosed(), cache);
        Iterator<CachedBlock> iterator = cache.iterator();
        // Though the split had created the HalfStorefileReader - the firstkey and lastkey scanners
        // should be closed inorder to return those blocks
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) ServerName(org.apache.hadoop.hbase.ServerName) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Aggregations

BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)32 Test (org.junit.Test)24 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)19 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)17 TableName (org.apache.hadoop.hbase.TableName)16 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)15 HStore (org.apache.hadoop.hbase.regionserver.HStore)13 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)12 CountDownLatch (java.util.concurrent.CountDownLatch)11 CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)10 BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)9 BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)8 Configuration (org.apache.hadoop.conf.Configuration)5 Cell (org.apache.hadoop.hbase.Cell)4 ArrayList (java.util.ArrayList)3 CacheStats (org.apache.hadoop.hbase.io.hfile.CacheStats)3 IndexOnlyLruBlockCache (org.apache.hadoop.hbase.io.hfile.IndexOnlyLruBlockCache)3 IOException (java.io.IOException)2 Path (org.apache.hadoop.fs.Path)2 CacheEvictionStats (org.apache.hadoop.hbase.CacheEvictionStats)2