Search in sources :

Example 41 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestBlockEvictionFromClient method testGetWithCellsInDifferentFiles.

@Test
public void testGetWithCellsInDifferentFiles() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        CustomInnerRegionObserver.waitForGets.set(true);
        // Create three sets of gets
        GetThread[] getThreads = initiateGet(table, false, false);
        Thread.sleep(200);
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (GetThread thread : getThreads) {
            thread.join();
        }
        // Verify whether the gets have returned the blocks that it had
        CustomInnerRegionObserver.waitForGets.set(true);
        // giving some time for the block to be decremented
        checkForBlockEviction(cache, true, false);
        getLatch.countDown();
        System.out.println("Gets should have returned the bloks");
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) TableName(org.apache.hadoop.hbase.TableName) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 42 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestBlockEvictionFromClient method testParallelGetsAndScans.

@Test
public void testParallelGetsAndScans() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(2);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        insertData(table);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // Create three sets of scan
        CustomInnerRegionObserver.waitForGets.set(true);
        ScanThread[] scanThreads = initiateScan(table, false);
        // Create three sets of gets
        GetThread[] getThreads = initiateGet(table, false, false);
        checkForBlockEviction(cache, false, false);
        CustomInnerRegionObserver.waitForGets.set(false);
        checkForBlockEviction(cache, false, false);
        for (GetThread thread : getThreads) {
            thread.join();
        }
        // Verify whether the gets have returned the blocks that it had
        CustomInnerRegionObserver.waitForGets.set(true);
        // giving some time for the block to be decremented
        checkForBlockEviction(cache, true, false);
        getLatch.countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        System.out.println("Scans should have returned the bloks");
        // Check with either true or false
        CustomInnerRegionObserver.waitForGets.set(false);
        // The scan should also have released the blocks by now
        checkForBlockEviction(cache, true, true);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) TableName(org.apache.hadoop.hbase.TableName) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 43 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestBlockEvictionFromClient method testBlockRefCountAfterSplits.

@Test
public void testBlockRefCountAfterSplits() throws IOException, InterruptedException {
    Table table = null;
    try {
        final TableName tableName = TableName.valueOf(name.getMethodName());
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024);
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW2);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        put = new Put(ROW3);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        TEST_UTIL.getAdmin().split(tableName, ROW1);
        List<HRegionInfo> tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName);
        // Wait for splits
        while (tableRegions.size() != 2) {
            tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName);
            Thread.sleep(100);
        }
        region.compact(true);
        Iterator<CachedBlock> iterator = cache.iterator();
        // Though the split had created the HalfStorefileReader - the firstkey and lastkey scanners
        // should be closed inorder to return those blocks
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Store(org.apache.hadoop.hbase.regionserver.Store) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 44 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class TestHBaseFsckEncryption method extractHFileKey.

private byte[] extractHFileKey(Path path) throws Exception {
    HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path, new CacheConfig(conf), conf);
    try {
        reader.loadFileInfo();
        Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext();
        assertNotNull("Reader has a null crypto context", cryptoContext);
        Key key = cryptoContext.getKey();
        assertNotNull("Crypto context has no key", key);
        return key.getEncoded();
    } finally {
        reader.close();
    }
}
Also used : Encryption(org.apache.hadoop.hbase.io.crypto.Encryption) HFile(org.apache.hadoop.hbase.io.hfile.HFile) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Key(java.security.Key)

Example 45 with CacheConfig

use of org.apache.hadoop.hbase.io.hfile.CacheConfig in project hbase by apache.

the class SplitTableRegionProcedure method splitStoreFiles.

/**
   * Create Split directory
   * @param env MasterProcedureEnv
   * @throws IOException
   */
private Pair<Integer, Integer> splitStoreFiles(final MasterProcedureEnv env, final HRegionFileSystem regionFs) throws IOException {
    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
    final Configuration conf = env.getMasterConfiguration();
    // The following code sets up a thread pool executor with as many slots as
    // there's files to split. It then fires up everything, waits for
    // completion and finally checks for any exception
    //
    // Note: splitStoreFiles creates daughter region dirs under the parent splits dir
    // Nothing to unroll here if failure -- re-run createSplitsDir will
    // clean this up.
    int nbFiles = 0;
    for (String family : regionFs.getFamilies()) {
        Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null) {
            nbFiles += storeFiles.size();
        }
    }
    if (nbFiles == 0) {
        // no file needs to be splitted.
        return new Pair<>(0, 0);
    }
    // Default max #threads to use is the smaller of table's configured number of blocking store
    // files or the available number of logical cores.
    int defMaxThreads = Math.min(conf.getInt(HStore.BLOCKING_STOREFILES_KEY, HStore.DEFAULT_BLOCKING_STOREFILE_COUNT), Runtime.getRuntime().availableProcessors());
    // Max #threads is the smaller of the number of storefiles or the default max determined above.
    int maxThreads = Math.min(conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX, defMaxThreads), nbFiles);
    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + parentHRI + " using " + maxThreads + " threads");
    ThreadPoolExecutor threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, Threads.getNamedThreadFactory("StoreFileSplitter-%1$d"));
    List<Future<Pair<Path, Path>>> futures = new ArrayList<>(nbFiles);
    // Split each store file.
    final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
    for (String family : regionFs.getFamilies()) {
        final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
        final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
        if (storeFiles != null && storeFiles.size() > 0) {
            final CacheConfig cacheConf = new CacheConfig(conf, hcd);
            for (StoreFileInfo storeFileInfo : storeFiles) {
                StoreFileSplitter sfs = new StoreFileSplitter(regionFs, family.getBytes(), new StoreFile(mfs.getFileSystem(), storeFileInfo, conf, cacheConf, hcd.getBloomFilterType()));
                futures.add(threadPool.submit(sfs));
            }
        }
    }
    // Shutdown the pool
    threadPool.shutdown();
    // Wait for all the tasks to finish
    long fileSplitTimeout = conf.getLong("hbase.master.fileSplitTimeout", 30000);
    try {
        boolean stillRunning = !threadPool.awaitTermination(fileSplitTimeout, TimeUnit.MILLISECONDS);
        if (stillRunning) {
            threadPool.shutdownNow();
            // wait for the thread to shutdown completely.
            while (!threadPool.isTerminated()) {
                Thread.sleep(50);
            }
            throw new IOException("Took too long to split the" + " files and create the references, aborting split");
        }
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
    }
    int daughterA = 0;
    int daughterB = 0;
    // Look for any exception
    for (Future<Pair<Path, Path>> future : futures) {
        try {
            Pair<Path, Path> p = future.get();
            daughterA += p.getFirst() != null ? 1 : 0;
            daughterB += p.getSecond() != null ? 1 : 0;
        } catch (InterruptedException e) {
            throw (InterruptedIOException) new InterruptedIOException().initCause(e);
        } catch (ExecutionException e) {
            throw new IOException(e);
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Split storefiles for region " + parentHRI + " Daughter A: " + daughterA + " storefiles, Daughter B: " + daughterB + " storefiles.");
    }
    return new Pair<>(daughterA, daughterB);
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) InterruptedIOException(java.io.InterruptedIOException) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) ExecutionException(java.util.concurrent.ExecutionException) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Pair(org.apache.hadoop.hbase.util.Pair) Path(org.apache.hadoop.fs.Path) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) InterruptedIOException(java.io.InterruptedIOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Future(java.util.concurrent.Future) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) StoreFileInfo(org.apache.hadoop.hbase.regionserver.StoreFileInfo)

Aggregations

CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)63 Path (org.apache.hadoop.fs.Path)28 Test (org.junit.Test)26 Configuration (org.apache.hadoop.conf.Configuration)21 HFile (org.apache.hadoop.hbase.io.hfile.HFile)21 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)21 FileSystem (org.apache.hadoop.fs.FileSystem)20 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)20 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)15 KeyValue (org.apache.hadoop.hbase.KeyValue)14 TableName (org.apache.hadoop.hbase.TableName)14 Region (org.apache.hadoop.hbase.regionserver.Region)13 Store (org.apache.hadoop.hbase.regionserver.Store)13 Cell (org.apache.hadoop.hbase.Cell)10 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)10 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)10 IOException (java.io.IOException)9 CountDownLatch (java.util.concurrent.CountDownLatch)8 FileStatus (org.apache.hadoop.fs.FileStatus)8 HFileScanner (org.apache.hadoop.hbase.io.hfile.HFileScanner)8