Search in sources :

Example 11 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class BucketCache method retrieveFromFile.

@SuppressWarnings("unchecked")
private void retrieveFromFile(int[] bucketSizes) throws IOException, BucketAllocatorException, ClassNotFoundException {
    File persistenceFile = new File(persistencePath);
    if (!persistenceFile.exists()) {
        return;
    }
    assert !cacheEnabled;
    FileInputStream fis = null;
    ObjectInputStream ois = null;
    try {
        if (!ioEngine.isPersistent())
            throw new IOException("Attempt to restore non-persistent cache mappings!");
        fis = new FileInputStream(persistencePath);
        ois = new ObjectInputStream(fis);
        long capacitySize = ois.readLong();
        if (capacitySize != cacheCapacity)
            throw new IOException("Mismatched cache capacity:" + StringUtils.byteDesc(capacitySize) + ", expected: " + StringUtils.byteDesc(cacheCapacity));
        String ioclass = ois.readUTF();
        String mapclass = ois.readUTF();
        if (!ioEngine.getClass().getName().equals(ioclass))
            throw new IOException("Class name for IO engine mismatch: " + ioclass + ", expected:" + ioEngine.getClass().getName());
        if (!backingMap.getClass().getName().equals(mapclass))
            throw new IOException("Class name for cache map mismatch: " + mapclass + ", expected:" + backingMap.getClass().getName());
        UniqueIndexMap<Integer> deserMap = (UniqueIndexMap<Integer>) ois.readObject();
        ConcurrentHashMap<BlockCacheKey, BucketEntry> backingMapFromFile = (ConcurrentHashMap<BlockCacheKey, BucketEntry>) ois.readObject();
        BucketAllocator allocator = new BucketAllocator(cacheCapacity, bucketSizes, backingMapFromFile, realCacheSize);
        bucketAllocator = allocator;
        deserialiserMap = deserMap;
        backingMap = backingMapFromFile;
    } finally {
        if (ois != null)
            ois.close();
        if (fis != null)
            fis.close();
        if (!persistenceFile.delete()) {
            throw new IOException("Failed deleting persistence file " + persistenceFile.getAbsolutePath());
        }
    }
}
Also used : IOException(java.io.IOException) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) FileInputStream(java.io.FileInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) File(java.io.File) ObjectInputStream(java.io.ObjectInputStream)

Example 12 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBlockEvictionFromClient method testBlockEvictionAfterHBASE13082WithCompactionAndFlush.

@Test
public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() throws IOException, InterruptedException {
    // do flush and scan in parallel
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        compactionLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        // insert data. 2 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // read the data and expect same blocks, one new hit, no misses
        int refCount = 0;
        // Check how this miss is happening
        // insert a second column, read the row, no new blocks, 3 new hits
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        Iterator<CachedBlock> iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // Create three sets of scan
        ScanThread[] scanThreads = initiateScan(table, false);
        Thread.sleep(100);
        iterator = cache.iterator();
        boolean usedBlocksFound = false;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        // Make a put and do a flush
        QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        data2 = Bytes.add(data, data);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        usedBlocksFound = false;
        System.out.println("Compacting");
        assertEquals(3, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        // Even after compaction is done we will have some blocks that cannot
        // be evicted this is because the scan is still referencing them
        iterator = cache.iterator();
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3 as they are not yet cleared
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        // Should not throw exception
        compactionLatch.countDown();
        latch.countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        // by this time all blocks should have been evicted
        iterator = cache.iterator();
        // Since a flush and compaction happened after a scan started
        // we need to ensure that all the original blocks of the compacted file
        // is also removed.
        iterateBlockCache(cache, iterator);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        // The gets would be working on new blocks
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 13 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBlockEvictionFromClient method checkForBlockEviction.

private void checkForBlockEviction(BlockCache cache, boolean getClosed, boolean expectOnlyZero) throws InterruptedException {
    int counter = NO_OF_THREADS;
    if (CustomInnerRegionObserver.waitForGets.get()) {
        // Because only one row is selected, it has only 2 blocks
        counter = counter - 1;
        while (CustomInnerRegionObserver.countOfGets.get() < NO_OF_THREADS) {
            Thread.sleep(100);
        }
    } else {
        while (CustomInnerRegionObserver.countOfNext.get() < NO_OF_THREADS) {
            Thread.sleep(100);
        }
    }
    Iterator<CachedBlock> iterator = cache.iterator();
    int refCount = 0;
    while (iterator.hasNext()) {
        CachedBlock next = iterator.next();
        BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
        if (cache instanceof BucketCache) {
            refCount = ((BucketCache) cache).getRefCount(cacheKey);
        } else if (cache instanceof CombinedBlockCache) {
            refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
        } else {
            continue;
        }
        System.out.println(" the refcount is " + refCount + " block is " + cacheKey);
        if (CustomInnerRegionObserver.waitForGets.get()) {
            if (expectOnlyZero) {
                assertTrue(refCount == 0);
            }
            if (refCount != 0) {
                // all 3
                if (getClosed) {
                    // If get has closed only the scan's blocks would be available
                    assertEquals(refCount, CustomInnerRegionObserver.countOfGets.get());
                } else {
                    assertEquals(refCount, CustomInnerRegionObserver.countOfGets.get() + (NO_OF_THREADS));
                }
            }
        } else {
            // upon only 2 additionally
            if (expectOnlyZero) {
                assertTrue(refCount == 0);
            }
            if (refCount != 0) {
                if (getLatch == null) {
                    assertEquals(refCount, CustomInnerRegionObserver.countOfNext.get());
                } else {
                    assertEquals(refCount, CustomInnerRegionObserver.countOfNext.get() + (NO_OF_THREADS));
                }
            }
        }
    }
    CustomInnerRegionObserver.getCdl().get().countDown();
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)

Example 14 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBlockEvictionFromClient method iterateBlockCache.

private void iterateBlockCache(BlockCache cache, Iterator<CachedBlock> iterator) {
    int refCount;
    while (iterator.hasNext()) {
        CachedBlock next = iterator.next();
        BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
        if (cache instanceof BucketCache) {
            refCount = ((BucketCache) cache).getRefCount(cacheKey);
        } else if (cache instanceof CombinedBlockCache) {
            refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
        } else {
            continue;
        }
        assertEquals(0, refCount);
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)

Example 15 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBlockEvictionFromClient method testGetWithMultipleColumnFamilies.

@Test
public void testGetWithMultipleColumnFamilies() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        byte[][] fams = new byte[10][];
        fams[0] = FAMILY;
        for (int i = 1; i < 10; i++) {
            fams[i] = (Bytes.toBytes("testFamily" + i));
        }
        table = TEST_UTIL.createTable(tableName, fams, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        BlockCache cache = setCacheProperties(region);
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        for (int i = 1; i < 10; i++) {
            put = new Put(ROW);
            put.addColumn(Bytes.toBytes("testFamily" + i), Bytes.toBytes("testQualifier" + i), data2);
            table.put(put);
            if (i % 2 == 0) {
                region.flush(true);
            }
        }
        region.flush(true);
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        CustomInnerRegionObserver.waitForGets.set(true);
        // Create three sets of gets
        GetThread[] getThreads = initiateGet(table, true, true);
        Thread.sleep(200);
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean usedBlocksFound = false;
        int refCount = 0;
        int noOfBlocksWithRef = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                System.out.println("The refCount is " + refCount);
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
                noOfBlocksWithRef++;
            }
        }
        assertTrue(usedBlocksFound);
        // the number of blocks referred
        assertEquals(3, noOfBlocksWithRef);
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (GetThread thread : getThreads) {
            thread.join();
        }
        // Verify whether the gets have returned the blocks that it had
        CustomInnerRegionObserver.waitForGets.set(true);
        // giving some time for the block to be decremented
        checkForBlockEviction(cache, true, false);
        getLatch.countDown();
        System.out.println("Gets should have returned the bloks");
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Aggregations

BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)16 CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)11 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)10 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)9 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)9 BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)9 TableName (org.apache.hadoop.hbase.TableName)8 Region (org.apache.hadoop.hbase.regionserver.Region)8 Test (org.junit.Test)8 CountDownLatch (java.util.concurrent.CountDownLatch)7 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)6 Store (org.apache.hadoop.hbase.regionserver.Store)5 IOException (java.io.IOException)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 ObjectInputStream (java.io.ObjectInputStream)1 ArrayList (java.util.ArrayList)1 Iterator (java.util.Iterator)1 Map (java.util.Map)1