Search in sources :

Example 86 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestBlockEvictionFromClient method testParallelGetsAndScanWithWrappedRegionScanner.

@Test
public void testParallelGetsAndScanWithWrappedRegionScanner() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(2);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        // insert data. 2 Rows are added
        insertData(table);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // CustomInnerRegionObserver.sleepTime.set(5000);
        // Create three sets of scan
        CustomInnerRegionObserver.waitForGets.set(true);
        ScanThread[] scanThreads = initiateScan(table, false);
        // Create three sets of gets
        GetThread[] getThreads = initiateGet(table, false, false);
        // The block would have been decremented for the scan case as it was
        // wrapped
        // before even the postNext hook gets executed.
        // giving some time for the block to be decremented
        Thread.sleep(100);
        CustomInnerRegionObserver.waitForGets.set(false);
        checkForBlockEviction(cache, false, false);
        // countdown the latch
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (GetThread thread : getThreads) {
            thread.join();
        }
        getLatch.countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) TableName(org.apache.hadoop.hbase.TableName) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 87 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestBlockEvictionFromClient method testBlockEvictionWithParallelScans.

@Test
public void testBlockEvictionWithParallelScans() throws Exception {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        // insert data. 2 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // data was in memstore so don't expect any changes
        // flush the data
        System.out.println("Flushing cache in problematic area");
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // Load cache
        // Create three sets of scan
        ScanThread[] scanThreads = initiateScan(table, false);
        Thread.sleep(100);
        checkForBlockEviction(cache, false, false);
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        // CustomInnerRegionObserver.sleepTime.set(0);
        Iterator<CachedBlock> iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // read the data and expect same blocks, one new hit, no misses
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // Check how this miss is happening
        // insert a second column, read the row, no new blocks, 3 new hits
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // compact, net minus two blocks, two hits, no misses
        System.out.println("Compacting");
        assertEquals(2, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // read the row, this should be a cache miss because we don't cache data
        // blocks on compaction
        r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) TableName(org.apache.hadoop.hbase.TableName) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 88 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestBlockEvictionFromClient method testScanWithCompactionInternals.

private void testScanWithCompactionInternals(String tableNameStr, boolean reversed) throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        compactionLatch = new CountDownLatch(1);
        TableName tableName = TableName.valueOf(tableNameStr);
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        // insert data. 2 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // read the data and expect same blocks, one new hit, no misses
        int refCount = 0;
        // Check how this miss is happening
        // insert a second column, read the row, no new blocks, 3 new hits
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        Iterator<CachedBlock> iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // Create three sets of scan
        ScanThread[] scanThreads = initiateScan(table, reversed);
        Thread.sleep(100);
        iterator = cache.iterator();
        boolean usedBlocksFound = false;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        usedBlocksFound = false;
        System.out.println("Compacting");
        assertEquals(2, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        // Even after compaction is done we will have some blocks that cannot
        // be evicted this is because the scan is still referencing them
        iterator = cache.iterator();
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3 as they are not yet cleared
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        // Should not throw exception
        compactionLatch.countDown();
        latch.countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        // by this time all blocks should have been evicted
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        // The gets would be working on new blocks
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Example 89 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestBlockEvictionFromClient method testScanWithException.

@Test
public void testScanWithException() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        exceptionLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        // insert data. 2 Rows are added
        insertData(table);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // CustomInnerRegionObserver.sleepTime.set(5000);
        CustomInnerRegionObserver.throwException.set(true);
        ScanThread[] scanThreads = initiateScan(table, false);
        // The block would have been decremented for the scan case as it was
        // wrapped
        // before even the postNext hook gets executed.
        // giving some time for the block to be decremented
        Thread.sleep(100);
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean usedBlocksFound = false;
        int refCount = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue(usedBlocksFound);
        exceptionLatch.countDown();
        // countdown the latch
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        iterator = cache.iterator();
        usedBlocksFound = false;
        refCount = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertFalse(usedBlocksFound);
        // you should always see 0 ref count. since after HBASE-16604 we always recreate the scanner
        assertEquals(0, refCount);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 90 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestBlockEvictionFromClient method testGetsWithMultiColumnsAndExplicitTracker.

@Test
public // TODO : check how block index works here
void testGetsWithMultiColumnsAndExplicitTracker() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        BlockCache cache = setCacheProperties(region);
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        for (int i = 1; i < 10; i++) {
            put = new Put(ROW);
            put.addColumn(FAMILY, Bytes.toBytes("testQualifier" + i), data2);
            table.put(put);
            if (i % 2 == 0) {
                region.flush(true);
            }
        }
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        CustomInnerRegionObserver.waitForGets.set(true);
        // Create three sets of gets
        GetThread[] getThreads = initiateGet(table, true, false);
        Thread.sleep(200);
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean usedBlocksFound = false;
        int refCount = 0;
        int noOfBlocksWithRef = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                System.out.println("The refCount is " + refCount);
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
                noOfBlocksWithRef++;
            }
        }
        assertTrue(usedBlocksFound);
        // the number of blocks referred
        assertEquals(10, noOfBlocksWithRef);
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (GetThread thread : getThreads) {
            thread.join();
        }
        // Verify whether the gets have returned the blocks that it had
        CustomInnerRegionObserver.waitForGets.set(true);
        // giving some time for the block to be decremented
        checkForBlockEviction(cache, true, false);
        getLatch.countDown();
        System.out.println("Gets should have returned the bloks");
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Aggregations

CountDownLatch (java.util.concurrent.CountDownLatch)5355 Test (org.junit.Test)2594 IOException (java.io.IOException)631 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)550 AtomicReference (java.util.concurrent.atomic.AtomicReference)501 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)475 ArrayList (java.util.ArrayList)471 QuickTest (com.hazelcast.test.annotation.QuickTest)375 ParallelTest (com.hazelcast.test.annotation.ParallelTest)355 ExecutorService (java.util.concurrent.ExecutorService)322 Test (org.testng.annotations.Test)310 HazelcastInstance (com.hazelcast.core.HazelcastInstance)251 List (java.util.List)212 HashMap (java.util.HashMap)207 HttpServletResponse (javax.servlet.http.HttpServletResponse)207 ExecutionException (java.util.concurrent.ExecutionException)203 HttpServletRequest (javax.servlet.http.HttpServletRequest)189 Ignite (org.apache.ignite.Ignite)188 ServletException (javax.servlet.ServletException)183 TimeoutException (java.util.concurrent.TimeoutException)168