Search in sources :

Example 6 with CombinedBlockCache

use of org.apache.hadoop.hbase.io.hfile.CombinedBlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method checkForBlockEviction.

private void checkForBlockEviction(BlockCache cache, boolean getClosed, boolean expectOnlyZero) throws InterruptedException {
    int counter = NO_OF_THREADS;
    if (CustomInnerRegionObserver.waitForGets.get()) {
        // Because only one row is selected, it has only 2 blocks
        counter = counter - 1;
        while (CustomInnerRegionObserver.countOfGets.get() < NO_OF_THREADS) {
            Thread.sleep(100);
        }
    } else {
        while (CustomInnerRegionObserver.countOfNext.get() < NO_OF_THREADS) {
            Thread.sleep(100);
        }
    }
    Iterator<CachedBlock> iterator = cache.iterator();
    int refCount = 0;
    while (iterator.hasNext()) {
        CachedBlock next = iterator.next();
        BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
        if (cache instanceof BucketCache) {
            refCount = ((BucketCache) cache).getRefCount(cacheKey);
        } else if (cache instanceof CombinedBlockCache) {
            refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
        } else {
            continue;
        }
        System.out.println(" the refcount is " + refCount + " block is " + cacheKey);
        if (CustomInnerRegionObserver.waitForGets.get()) {
            if (expectOnlyZero) {
                assertTrue(refCount == 0);
            }
            if (refCount != 0) {
                // all 3
                if (getClosed) {
                    // If get has closed only the scan's blocks would be available
                    assertEquals(refCount, CustomInnerRegionObserver.countOfGets.get());
                } else {
                    assertEquals(refCount, CustomInnerRegionObserver.countOfGets.get() + (NO_OF_THREADS));
                }
            }
        } else {
            // upon only 2 additionally
            if (expectOnlyZero) {
                assertTrue(refCount == 0);
            }
            if (refCount != 0) {
                if (getLatch == null) {
                    assertEquals(refCount, CustomInnerRegionObserver.countOfNext.get());
                } else {
                    assertEquals(refCount, CustomInnerRegionObserver.countOfNext.get() + (NO_OF_THREADS));
                }
            }
        }
    }
    CustomInnerRegionObserver.getCdl().get().countDown();
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)

Example 7 with CombinedBlockCache

use of org.apache.hadoop.hbase.io.hfile.CombinedBlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method iterateBlockCache.

private void iterateBlockCache(BlockCache cache, Iterator<CachedBlock> iterator) {
    int refCount;
    while (iterator.hasNext()) {
        CachedBlock next = iterator.next();
        BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
        if (cache instanceof BucketCache) {
            refCount = ((BucketCache) cache).getRefCount(cacheKey);
        } else if (cache instanceof CombinedBlockCache) {
            refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
        } else {
            continue;
        }
        assertEquals(0, refCount);
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)

Example 8 with CombinedBlockCache

use of org.apache.hadoop.hbase.io.hfile.CombinedBlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method testGetWithMultipleColumnFamilies.

@Test
public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        byte[][] fams = new byte[10][];
        fams[0] = FAMILY;
        for (int i = 1; i < 10; i++) {
            fams[i] = (Bytes.toBytes("testFamily" + i));
        }
        table = TEST_UTIL.createTable(tableName, fams, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        BlockCache cache = setCacheProperties(region);
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        for (int i = 1; i < 10; i++) {
            put = new Put(ROW);
            put.addColumn(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
            table.put(put);
            if (i % 2 == 0) {
                region.flush(true);
            }
        }
        region.flush(true);
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        CustomInnerRegionObserver.waitForGets.set(true);
        // Create three sets of gets
        GetThread[] getThreads = initiateGet(table, true, true);
        Thread.sleep(200);
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean usedBlocksFound = false;
        int refCount = 0;
        int noOfBlocksWithRef = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                System.out.println("The refCount is " + refCount);
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
                noOfBlocksWithRef++;
            }
        }
        assertTrue(usedBlocksFound);
        // the number of blocks referred
        assertEquals(3, noOfBlocksWithRef);
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (GetThread thread : getThreads) {
            thread.join();
        }
        // Verify whether the gets have returned the blocks that it had
        CustomInnerRegionObserver.waitForGets.set(true);
        // giving some time for the block to be decremented
        checkForBlockEviction(cache, true, false);
        getLatch.countDown();
        System.out.println("Gets should have returned the bloks");
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Example 9 with CombinedBlockCache

use of org.apache.hadoop.hbase.io.hfile.CombinedBlockCache in project hbase by apache.

the class TestBlockEvictionFromClient method testScanWithMultipleColumnFamilies.

@Test
public void testScanWithMultipleColumnFamilies() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        // Check if get() returns blocks on its close() itself
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        byte[][] fams = new byte[10][];
        fams[0] = FAMILY;
        for (int i = 1; i < 10; i++) {
            fams[i] = (Bytes.toBytes("testFamily" + i));
        }
        table = TEST_UTIL.createTable(tableName, fams, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        BlockCache cache = setCacheProperties(region);
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        for (int i = 1; i < 10; i++) {
            put = new Put(ROW);
            put.addColumn(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
            table.put(put);
            if (i % 2 == 0) {
                region.flush(true);
            }
        }
        region.flush(true);
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        // Create three sets of gets
        ScanThread[] scanThreads = initiateScan(table, true);
        Thread.sleep(200);
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean usedBlocksFound = false;
        int refCount = 0;
        int noOfBlocksWithRef = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                System.out.println("The refCount is " + refCount);
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
                noOfBlocksWithRef++;
            }
        }
        assertTrue(usedBlocksFound);
        // the number of blocks referred
        assertEquals(12, noOfBlocksWithRef);
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        // giving some time for the block to be decremented
        checkForBlockEviction(cache, true, false);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Aggregations

MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)9 BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)9 CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)9 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)9 BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)9 CountDownLatch (java.util.concurrent.CountDownLatch)7 TableName (org.apache.hadoop.hbase.TableName)7 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)7 Region (org.apache.hadoop.hbase.regionserver.Region)7 Test (org.junit.Test)6 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)4 Store (org.apache.hadoop.hbase.regionserver.Store)4