Search in sources :

Example 6 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBlockEvictionFromClient method testGetsWithMultiColumnsAndExplicitTracker.

@Test
public // TODO : check how block index works here
void testGetsWithMultiColumnsAndExplicitTracker() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        BlockCache cache = setCacheProperties(region);
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        for (int i = 1; i < 10; i++) {
            put = new Put(ROW);
            put.addColumn(FAMILY, Bytes.toBytes("testQualifier" + i), data2);
            table.put(put);
            if (i % 2 == 0) {
                region.flush(true);
            }
        }
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        CustomInnerRegionObserver.waitForGets.set(true);
        // Create three sets of gets
        GetThread[] getThreads = initiateGet(table, true, false);
        Thread.sleep(200);
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean usedBlocksFound = false;
        int refCount = 0;
        int noOfBlocksWithRef = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                System.out.println("The refCount is " + refCount);
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
                noOfBlocksWithRef++;
            }
        }
        assertTrue(usedBlocksFound);
        // the number of blocks referred
        assertEquals(10, noOfBlocksWithRef);
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (GetThread thread : getThreads) {
            thread.join();
        }
        // Verify whether the gets have returned the blocks that it had
        CustomInnerRegionObserver.waitForGets.set(true);
        // giving some time for the block to be decremented
        checkForBlockEviction(cache, true, false);
        getLatch.countDown();
        System.out.println("Gets should have returned the bloks");
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Example 7 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBucketCache method testMemoryLeak.

@Test
public void testMemoryLeak() throws Exception {
    final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L);
    cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable(new byte[10]));
    long lockId = cache.backingMap.get(cacheKey).offset();
    ReentrantReadWriteLock lock = cache.offsetLock.getLock(lockId);
    lock.writeLock().lock();
    Thread evictThread = new Thread("evict-block") {

        @Override
        public void run() {
            cache.evictBlock(cacheKey);
        }
    };
    evictThread.start();
    cache.offsetLock.waitForWaiters(lockId, 1);
    cache.blockEvicted(cacheKey, cache.backingMap.remove(cacheKey), true);
    cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable(new byte[10]));
    lock.writeLock().unlock();
    evictThread.join();
    assertEquals(1L, cache.getBlockCount());
    assertTrue(cache.getCurrentSize() > 0L);
    assertTrue("We should have a block!", cache.iterator().hasNext());
}
Also used : CacheTestUtils(org.apache.hadoop.hbase.io.hfile.CacheTestUtils) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) Test(org.junit.Test)

Example 8 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBucketWriterThread method setUp.

/**
   * Set up variables and get BucketCache and WriterThread into state where tests can  manually
   * control the running of WriterThread and BucketCache is empty.
   * @throws Exception
   */
@Before
public void setUp() throws Exception {
    // Arbitrary capacity.
    final int capacity = 16;
    // Run with one writer thread only. Means there will be one writer queue only too.  We depend
    // on this in below.
    final int writerThreadsCount = 1;
    this.bc = new MockBucketCache("heap", capacity, 1, new int[] { 1 }, writerThreadsCount, capacity, null, 100);
    assertEquals(writerThreadsCount, bc.writerThreads.length);
    assertEquals(writerThreadsCount, bc.writerQueues.size());
    // Get reference to our single WriterThread instance.
    this.wt = bc.writerThreads[0];
    this.q = bc.writerQueues.get(0);
    wt.disableWriter();
    this.plainKey = new BlockCacheKey("f", 0);
    this.plainCacheable = Mockito.mock(Cacheable.class);
    assertThat(bc.ramCache.isEmpty(), is(true));
    assertTrue(q.isEmpty());
}
Also used : Cacheable(org.apache.hadoop.hbase.io.hfile.Cacheable) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) Before(org.junit.Before)

Example 9 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestCacheOnWriteInSchema method readStoreFile.

private void readStoreFile(Path path) throws IOException {
    CacheConfig cacheConf = store.getCacheConfig();
    BlockCache cache = cacheConf.getBlockCache();
    StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL);
    HFile.Reader reader = sf.createReader().getHFileReader();
    try {
        // Open a scanner with (on read) caching disabled
        HFileScanner scanner = reader.getScanner(false, false);
        assertTrue(testDescription, scanner.seekTo());
        // Cribbed from io.hfile.TestCacheOnWrite
        long offset = 0;
        while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
            // Flags: don't cache the block, use pread, this is not a compaction.
            // Also, pass null for expected block type to avoid checking it.
            HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, DataBlockEncoding.NONE);
            BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset);
            boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
            boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
            if (shouldBeCached != isCached) {
                throw new AssertionError("shouldBeCached: " + shouldBeCached + "\n" + "isCached: " + isCached + "\n" + "Test description: " + testDescription + "\n" + "block: " + block + "\n" + "blockCacheKey: " + blockCacheKey);
            }
            offset += block.getOnDiskSizeWithHeader();
        }
    } finally {
        reader.close();
    }
}
Also used : HFileBlock(org.apache.hadoop.hbase.io.hfile.HFileBlock) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) HFileScanner(org.apache.hadoop.hbase.io.hfile.HFileScanner) HFile(org.apache.hadoop.hbase.io.hfile.HFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey)

Example 10 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class BucketCache method evictBlocksByHfileName.

/**
   * Evicts all blocks for a specific HFile.
   * <p>
   * This is used for evict-on-close to remove all blocks of a specific HFile.
   *
   * @return the number of blocks evicted
   */
@Override
public int evictBlocksByHfileName(String hfileName) {
    Set<BlockCacheKey> keySet = blocksByHFile.subSet(new BlockCacheKey(hfileName, Long.MIN_VALUE), true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
    int numEvicted = 0;
    for (BlockCacheKey key : keySet) {
        if (evictBlock(key)) {
            ++numEvicted;
        }
    }
    return numEvicted;
}
Also used : BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey)

Aggregations

BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)16 CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)11 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)10 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)9 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)9 BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)9 TableName (org.apache.hadoop.hbase.TableName)8 Region (org.apache.hadoop.hbase.regionserver.Region)8 Test (org.junit.Test)8 CountDownLatch (java.util.concurrent.CountDownLatch)7 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)6 Store (org.apache.hadoop.hbase.regionserver.Store)5 IOException (java.io.IOException)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 ObjectInputStream (java.io.ObjectInputStream)1 ArrayList (java.util.ArrayList)1 Iterator (java.util.Iterator)1 Map (java.util.Map)1