Search in sources :

Example 1 with BucketCache

use of org.apache.hadoop.hbase.io.hfile.bucket.BucketCache in project hbase by apache.

the class CacheConfig method getBucketCache.

private static BlockCache getBucketCache(Configuration c) {
    // Check for L2.  ioengine name must be non-null.
    String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null);
    if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0)
        return null;
    int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
    final long bucketCacheSize = MemorySizeUtil.getBucketCacheSize(c);
    if (bucketCacheSize <= 0) {
        throw new IllegalStateException("bucketCacheSize <= 0; Check " + BUCKET_CACHE_SIZE_KEY + " setting and/or server java heap size");
    }
    if (c.get("hbase.bucketcache.percentage.in.combinedcache") != null) {
        LOG.warn("Configuration 'hbase.bucketcache.percentage.in.combinedcache' is no longer " + "respected. See comments in http://hbase.apache.org/book.html#_changes_of_note");
    }
    int writerThreads = c.getInt(BUCKET_CACHE_WRITER_THREADS_KEY, DEFAULT_BUCKET_CACHE_WRITER_THREADS);
    int writerQueueLen = c.getInt(BUCKET_CACHE_WRITER_QUEUE_KEY, DEFAULT_BUCKET_CACHE_WRITER_QUEUE);
    String persistentPath = c.get(BUCKET_CACHE_PERSISTENT_PATH_KEY);
    String[] configuredBucketSizes = c.getStrings(BUCKET_CACHE_BUCKETS_KEY);
    int[] bucketSizes = null;
    if (configuredBucketSizes != null) {
        bucketSizes = new int[configuredBucketSizes.length];
        for (int i = 0; i < configuredBucketSizes.length; i++) {
            bucketSizes[i] = Integer.parseInt(configuredBucketSizes[i].trim());
        }
    }
    BucketCache bucketCache = null;
    try {
        int ioErrorsTolerationDuration = c.getInt("hbase.bucketcache.ioengine.errors.tolerated.duration", BucketCache.DEFAULT_ERROR_TOLERATION_DURATION);
        // Bucket cache logs its stats on creation internal to the constructor.
        bucketCache = new BucketCache(bucketCacheIOEngineName, bucketCacheSize, blockSize, bucketSizes, writerThreads, writerQueueLen, persistentPath, ioErrorsTolerationDuration);
    } catch (IOException ioex) {
        LOG.error("Can't instantiate bucket cache", ioex);
        throw new RuntimeException(ioex);
    }
    return bucketCache;
}
Also used : BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) IOException(java.io.IOException)

Example 2 with BucketCache

use of org.apache.hadoop.hbase.io.hfile.bucket.BucketCache in project hbase by apache.

the class TestCacheOnWrite method getBlockCaches.

private static List<BlockCache> getBlockCaches() throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    List<BlockCache> blockcaches = new ArrayList<>();
    // default
    blockcaches.add(BlockCacheFactory.createBlockCache(conf));
    // set LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME to 2.0f due to HBASE-16287
    TEST_UTIL.getConfiguration().setFloat(LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 2.0f);
    // memory
    BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration());
    blockcaches.add(lru);
    // bucket cache
    FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir());
    int[] bucketSizes = { INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024, 128 * 1024 };
    BlockCache bucketcache = new BucketCache("offheap", 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null);
    blockcaches.add(bucketcache);
    return blockcaches;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)

Example 3 with BucketCache

use of org.apache.hadoop.hbase.io.hfile.bucket.BucketCache in project hbase by apache.

the class TestHFileReaderImpl method testSeekBefore.

@Test
public void testSeekBefore() throws Exception {
    Path p = makeNewFile();
    FileSystem fs = TEST_UTIL.getTestFileSystem();
    Configuration conf = TEST_UTIL.getConfiguration();
    int[] bucketSizes = { 512, 2048, 4096, 64 * 1024, 128 * 1024 };
    BucketCache bucketcache = new BucketCache("offheap", 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null);
    HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf, bucketcache), true, conf);
    // warm cache
    HFileScanner scanner = reader.getScanner(conf, true, true);
    scanner.seekTo(toKV("i"));
    assertEquals("i", toRowStr(scanner.getCell()));
    scanner.close();
    while (bucketcache.getBlockCount() <= 0) {
        Thread.sleep(10);
    }
    // reopen again.
    scanner = reader.getScanner(conf, true, true);
    scanner.seekTo(toKV("i"));
    assertEquals("i", toRowStr(scanner.getCell()));
    scanner.seekBefore(toKV("i"));
    assertEquals("g", toRowStr(scanner.getCell()));
    scanner.close();
    for (CachedBlock cachedBlock : Lists.newArrayList(bucketcache)) {
        BlockCacheKey cacheKey = new BlockCacheKey(cachedBlock.getFilename(), cachedBlock.getOffset());
        int refCount = bucketcache.getRpcRefCount(cacheKey);
        assertEquals(0, refCount);
    }
    // case 2
    scanner = reader.getScanner(conf, true, true);
    scanner.seekTo(toKV("i"));
    assertEquals("i", toRowStr(scanner.getCell()));
    scanner.seekBefore(toKV("c"));
    scanner.close();
    for (CachedBlock cachedBlock : Lists.newArrayList(bucketcache)) {
        BlockCacheKey cacheKey = new BlockCacheKey(cachedBlock.getFilename(), cachedBlock.getOffset());
        int refCount = bucketcache.getRpcRefCount(cacheKey);
        assertEquals(0, refCount);
    }
    reader.close();
    // clear bucketcache
    for (CachedBlock cachedBlock : Lists.newArrayList(bucketcache)) {
        BlockCacheKey cacheKey = new BlockCacheKey(cachedBlock.getFilename(), cachedBlock.getOffset());
        bucketcache.evictBlock(cacheKey);
    }
    bucketcache.shutdown();
    deleteTestDir(fs);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Test(org.junit.Test)

Example 4 with BucketCache

use of org.apache.hadoop.hbase.io.hfile.bucket.BucketCache in project hbase by apache.

the class TestAvoidCellReferencesIntoShippedBlocks method evictBlock.

/**
 * For {@link BucketCache},we only evict Block if there is no rpc referenced.
 */
private void evictBlock(BlockCache blockCache, BlockCacheKey blockCacheKey) {
    assertTrue(blockCache instanceof CombinedBlockCache);
    BlockCache[] blockCaches = blockCache.getBlockCaches();
    for (BlockCache currentBlockCache : blockCaches) {
        if (currentBlockCache instanceof BucketCache) {
            ((BucketCache) currentBlockCache).evictBlockIfNoRpcReferenced(blockCacheKey);
        } else {
            currentBlockCache.evictBlock(blockCacheKey);
        }
    }
}
Also used : CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)

Example 5 with BucketCache

use of org.apache.hadoop.hbase.io.hfile.bucket.BucketCache in project hbase by apache.

the class TestBlockEvictionFromClient method testScanWithCompactionInternals.

private void testScanWithCompactionInternals(String tableNameStr, boolean reversed) throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        compactionLatch = new CountDownLatch(1);
        TableName tableName = TableName.valueOf(tableNameStr);
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegion().getEncodedName();
        HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
        HStore store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache().get();
        // insert data. 2 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // read the data and expect same blocks, one new hit, no misses
        int refCount = 0;
        // Check how this miss is happening
        // insert a second column, read the row, no new blocks, 3 new hits
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        Iterator<CachedBlock> iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // Create three sets of scan
        ScanThread[] scanThreads = initiateScan(table, reversed);
        Thread.sleep(100);
        iterator = cache.iterator();
        boolean usedBlocksFound = false;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        usedBlocksFound = false;
        System.out.println("Compacting");
        assertEquals(2, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        // Even after compaction is done we will have some blocks that cannot
        // be evicted this is because the scan is still referencing them
        iterator = cache.iterator();
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRpcRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRpcRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3 as they are not yet cleared
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        // Should not throw exception
        compactionLatch.countDown();
        latch.countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        // by this time all blocks should have been evicted
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        // The gets would be working on new blocks
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) HStore(org.apache.hadoop.hbase.regionserver.HStore) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Aggregations

BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)17 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)10 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)9 BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)9 CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)9 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)8 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)8 CountDownLatch (java.util.concurrent.CountDownLatch)7 TableName (org.apache.hadoop.hbase.TableName)7 Test (org.junit.Test)7 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)4 HStore (org.apache.hadoop.hbase.regionserver.HStore)4 IOException (java.io.IOException)2 Configuration (org.apache.hadoop.conf.Configuration)2 ArrayList (java.util.ArrayList)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 ArrayBackedTag (org.apache.hadoop.hbase.ArrayBackedTag)1 KeyValue (org.apache.hadoop.hbase.KeyValue)1 Tag (org.apache.hadoop.hbase.Tag)1