Search in sources :

Example 1 with CachedBlock

use of org.apache.hadoop.hbase.io.hfile.CachedBlock in project hbase by apache.

the class BucketCache method iterator.

@Override
public Iterator<CachedBlock> iterator() {
    // Don't bother with ramcache since stuff is in here only a little while.
    final Iterator<Map.Entry<BlockCacheKey, BucketEntry>> i = this.backingMap.entrySet().iterator();
    return new Iterator<CachedBlock>() {

        private final long now = System.nanoTime();

        @Override
        public boolean hasNext() {
            return i.hasNext();
        }

        @Override
        public CachedBlock next() {
            final Map.Entry<BlockCacheKey, BucketEntry> e = i.next();
            return new CachedBlock() {

                @Override
                public String toString() {
                    return BlockCacheUtil.toString(this, now);
                }

                @Override
                public BlockPriority getBlockPriority() {
                    return e.getValue().getPriority();
                }

                @Override
                public BlockType getBlockType() {
                    // Not held by BucketEntry.  Could add it if wanted on BucketEntry creation.
                    return null;
                }

                @Override
                public long getOffset() {
                    return e.getKey().getOffset();
                }

                @Override
                public long getSize() {
                    return e.getValue().getLength();
                }

                @Override
                public long getCachedTime() {
                    return e.getValue().getCachedTime();
                }

                @Override
                public String getFilename() {
                    return e.getKey().getHfileName();
                }

                @Override
                public int compareTo(CachedBlock other) {
                    int diff = this.getFilename().compareTo(other.getFilename());
                    if (diff != 0)
                        return diff;
                    diff = Long.compare(this.getOffset(), other.getOffset());
                    if (diff != 0)
                        return diff;
                    if (other.getCachedTime() < 0 || this.getCachedTime() < 0) {
                        throw new IllegalStateException("" + this.getCachedTime() + ", " + other.getCachedTime());
                    }
                    return Long.compare(other.getCachedTime(), this.getCachedTime());
                }

                @Override
                public int hashCode() {
                    return e.getKey().hashCode();
                }

                @Override
                public boolean equals(Object obj) {
                    if (obj instanceof CachedBlock) {
                        CachedBlock cb = (CachedBlock) obj;
                        return compareTo(cb) == 0;
                    } else {
                        return false;
                    }
                }
            };
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Iterator(java.util.Iterator) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey)

Example 2 with CachedBlock

use of org.apache.hadoop.hbase.io.hfile.CachedBlock in project hbase by apache.

the class TestAvoidCellReferencesIntoShippedBlocks method testHBASE16372InReadPath.

@Test
public void testHBASE16372InReadPath() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // Create a table with block size as 1024
    try (Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, null)) {
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        final BlockCache cache = cacheConf.getBlockCache().get();
        // insert data. 5 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW2);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW2);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW3);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW3);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW4);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW4);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW5);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW5);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        // data was in memstore so don't expect any changes
        region.flush(true);
        // Load cache
        Scan s = new Scan();
        s.setMaxResultSize(1000);
        int count;
        try (ResultScanner scanner = table.getScanner(s)) {
            count = Iterables.size(scanner);
        }
        assertEquals("Count all the rows ", 6, count);
        // Scan from cache
        s = new Scan();
        // Start a scan from row3
        s.setCaching(1);
        s.withStartRow(ROW1);
        // set partial as true so that the scan can send partial columns also
        s.setAllowPartialResults(true);
        s.setMaxResultSize(1000);
        try (ScanPerNextResultScanner scanner = new ScanPerNextResultScanner(TEST_UTIL.getAsyncConnection().getTable(tableName), s)) {
            Thread evictorThread = new Thread() {

                @Override
                public void run() {
                    List<BlockCacheKey> cacheList = new ArrayList<>();
                    Iterator<CachedBlock> iterator = cache.iterator();
                    // evict all the blocks
                    while (iterator.hasNext()) {
                        CachedBlock next = iterator.next();
                        BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
                        cacheList.add(cacheKey);
                        /**
                         * There is only one Block referenced by rpc,here we evict blocks which have no rpc
                         * referenced.
                         */
                        evictBlock(cache, cacheKey);
                    }
                    try {
                        Thread.sleep(1);
                    } catch (InterruptedException e1) {
                    }
                    iterator = cache.iterator();
                    int refBlockCount = 0;
                    while (iterator.hasNext()) {
                        iterator.next();
                        refBlockCount++;
                    }
                    assertEquals("One block should be there ", 1, refBlockCount);
                    // Rescan to prepopulate the data
                    // cache this row.
                    Scan s1 = new Scan();
                    // This scan will start from ROW1 and it will populate the cache with a
                    // row that is lower than ROW3.
                    s1.withStartRow(ROW3);
                    s1.withStopRow(ROW5);
                    s1.setCaching(1);
                    try (ResultScanner scanner = table.getScanner(s1)) {
                        int count = Iterables.size(scanner);
                        assertEquals("Count the rows", 2, count);
                        int newBlockRefCount = 0;
                        List<BlockCacheKey> newCacheList = new ArrayList<>();
                        while (true) {
                            newBlockRefCount = 0;
                            newCacheList.clear();
                            iterator = cache.iterator();
                            while (iterator.hasNext()) {
                                CachedBlock next = iterator.next();
                                BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
                                newCacheList.add(cacheKey);
                            }
                            for (BlockCacheKey key : cacheList) {
                                if (newCacheList.contains(key)) {
                                    newBlockRefCount++;
                                }
                            }
                            if (newBlockRefCount == 6) {
                                break;
                            }
                        }
                        latch.countDown();
                    } catch (IOException e) {
                    }
                }
            };
            count = 0;
            while (scanner.next() != null) {
                count++;
                if (count == 2) {
                    evictorThread.start();
                    latch.await();
                }
            }
        }
        assertEquals("Count should give all rows ", 10, count);
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) ArrayList(java.util.ArrayList) IOException(java.io.IOException) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 3 with CachedBlock

use of org.apache.hadoop.hbase.io.hfile.CachedBlock in project hbase by apache.

the class TestBlockEvictionFromClient method testScanWithCompactionInternals.

private void testScanWithCompactionInternals(String tableNameStr, boolean reversed) throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        compactionLatch = new CountDownLatch(1);
        TableName tableName = TableName.valueOf(tableNameStr);
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache().get();
        // insert data. 2 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // read the data and expect same blocks, one new hit, no misses
        int refCount = 0;
        // Check how this miss is happening
        // insert a second column, read the row, no new blocks, 3 new hits
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        Iterator<CachedBlock> iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // Create three sets of scan
        ScanThread[] scanThreads = initiateScan(table, reversed);
        Thread.sleep(100);
        iterator = cache.iterator();
        boolean usedBlocksFound = false;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        usedBlocksFound = false;
        System.out.println("Compacting");
        assertEquals(2, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        // Even after compaction is done we will have some blocks that cannot
        // be evicted this is because the scan is still referencing them
        iterator = cache.iterator();
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3 as they are not yet cleared
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        // Should not throw exception
        compactionLatch.countDown();
        latch.countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        // by this time all blocks should have been evicted
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        // The gets would be working on new blocks
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Example 4 with CachedBlock

use of org.apache.hadoop.hbase.io.hfile.CachedBlock in project hbase by apache.

the class TestBlockEvictionFromClient method testBlockEvictionWithParallelScans.

@Test
public void testBlockEvictionWithParallelScans() throws Exception {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache().get();
        // insert data. 2 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // data was in memstore so don't expect any changes
        // flush the data
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // Load cache
        // Create three sets of scan
        ScanThread[] scanThreads = initiateScan(table, false);
        Thread.sleep(100);
        checkForBlockEviction(cache, false, false);
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        // CustomInnerRegionObserver.sleepTime.set(0);
        Iterator<CachedBlock> iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // read the data and expect same blocks, one new hit, no misses
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // Check how this miss is happening
        // insert a second column, read the row, no new blocks, 3 new hits
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // compact, net minus two blocks, two hits, no misses
        System.out.println("Compacting");
        assertEquals(2, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // read the row, this should be a cache miss because we don't cache data
        // blocks on compaction
        r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 5 with CachedBlock

use of org.apache.hadoop.hbase.io.hfile.CachedBlock in project hbase by apache.

the class TestBlockEvictionFromClient method testGetsWithMultiColumnsAndExplicitTracker.

@Test
public // TODO : check how block index works here
void testGetsWithMultiColumnsAndExplicitTracker() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        BlockCache cache = setCacheProperties(region);
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        for (int i = 1; i < 10; i++) {
            put = new Put(ROW);
            put.addColumn(FAMILY, Bytes.toBytes("testQualifier" + i), data2);
            table.put(put);
            if (i % 2 == 0) {
                region.flush(true);
            }
        }
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        CustomInnerRegionObserver.waitForGets.set(true);
        // Create three sets of gets
        GetThread[] getThreads = initiateGet(table, true, false);
        Thread.sleep(200);
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean usedBlocksFound = false;
        int refCount = 0;
        int noOfBlocksWithRef = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                System.out.println("The refCount is " + refCount);
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
                noOfBlocksWithRef++;
            }
        }
        assertTrue(usedBlocksFound);
        // the number of blocks referred
        assertEquals(10, noOfBlocksWithRef);
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (GetThread thread : getThreads) {
            thread.join();
        }
        // Verify whether the gets have returned the blocks that it had
        CustomInnerRegionObserver.waitForGets.set(true);
        // giving some time for the block to be decremented
        checkForBlockEviction(cache, true, false);
        getLatch.countDown();
        System.out.println("Gets should have returned the bloks");
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Test(org.junit.Test)

Aggregations

CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)13 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)12 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)11 BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)11 TableName (org.apache.hadoop.hbase.TableName)10 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)10 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)10 BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)9 Test (org.junit.Test)9 CountDownLatch (java.util.concurrent.CountDownLatch)8 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)7 HStore (org.apache.hadoop.hbase.regionserver.HStore)7 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 Iterator (java.util.Iterator)1 Map (java.util.Map)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ConcurrentMap (java.util.concurrent.ConcurrentMap)1 ServerName (org.apache.hadoop.hbase.ServerName)1