Search in sources :

Example 1 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.

the class CompactedHFilesDischarger method chore.

@Override
public void chore() {
    // when the test case is not spinning up a cluster
    if (regionServerServices == null)
        return;
    List<Region> onlineRegions = regionServerServices.getOnlineRegions();
    if (onlineRegions != null) {
        for (Region region : onlineRegions) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("Started the compacted hfiles cleaner for the region " + region.getRegionInfo());
            }
            for (Store store : region.getStores()) {
                try {
                    if (useExecutor && regionServerServices != null) {
                        CompactedHFilesDischargeHandler handler = new CompactedHFilesDischargeHandler((Server) regionServerServices, EventType.RS_COMPACTED_FILES_DISCHARGER, (HStore) store);
                        regionServerServices.getExecutorService().submit(handler);
                    } else {
                        // call synchronously if the RegionServerServices are not
                        // available
                        store.closeAndArchiveCompactedFiles();
                    }
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Completed archiving the compacted files for the region " + region.getRegionInfo() + " under the store " + store.getColumnFamilyName());
                    }
                } catch (Exception e) {
                    LOG.error("Exception while trying to close and archive the compacted store " + "files of the store  " + store.getColumnFamilyName() + " in the" + " region " + region.getRegionInfo(), e);
                }
            }
            if (LOG.isTraceEnabled()) {
                LOG.trace("Completed the compacted hfiles cleaner for the region " + region.getRegionInfo());
            }
        }
    }
}
Also used : Region(org.apache.hadoop.hbase.regionserver.Region) Store(org.apache.hadoop.hbase.regionserver.Store)

Example 2 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.

the class SnapshotManifest method addRegion.

/**
   * Creates a 'manifest' for the specified region, by reading directly from the HRegion object.
   * This is used by the "online snapshot" when the table is enabled.
   */
public void addRegion(final HRegion region) throws IOException {
    // 0. Get the ManifestBuilder/RegionVisitor
    RegionVisitor visitor = createRegionVisitor(desc);
    // 1. dump region meta info into the snapshot directory
    LOG.debug("Storing '" + region + "' region-info for snapshot.");
    Object regionData = visitor.regionOpen(region.getRegionInfo());
    monitor.rethrowException();
    // 2. iterate through all the stores in the region
    LOG.debug("Creating references for hfiles");
    for (Store store : region.getStores()) {
        // 2.1. build the snapshot reference for the store
        Object familyData = visitor.familyOpen(regionData, store.getFamily().getName());
        monitor.rethrowException();
        List<StoreFile> storeFiles = new ArrayList<>(store.getStorefiles());
        if (LOG.isDebugEnabled()) {
            LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
        }
        // 2.2. iterate through all the store's files and create "references".
        for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
            StoreFile storeFile = storeFiles.get(i);
            monitor.rethrowException();
            // create "reference" to this store file.
            LOG.debug("Adding reference for file (" + (i + 1) + "/" + sz + "): " + storeFile.getPath());
            visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
        }
        visitor.familyClose(regionData, familyData);
    }
    visitor.regionClose(regionData);
}
Also used : ArrayList(java.util.ArrayList) StoreFile(org.apache.hadoop.hbase.regionserver.StoreFile) Store(org.apache.hadoop.hbase.regionserver.Store)

Example 3 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.

the class HFileArchiveTestingUtil method getStoreArchivePath.

public static Path getStoreArchivePath(HBaseTestingUtility util, String tableName, byte[] storeName) throws IOException {
    byte[] table = Bytes.toBytes(tableName);
    // get the RS and region serving our table
    List<HRegion> servingRegions = util.getHBaseCluster().getRegions(table);
    HRegion region = servingRegions.get(0);
    // check that we actually have some store files that were archived
    Store store = region.getStore(storeName);
    return HFileArchiveTestingUtil.getStoreArchivePath(util.getConfiguration(), region, store);
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Store(org.apache.hadoop.hbase.regionserver.Store)

Example 4 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.

the class TestFromClientSide method testCacheOnWriteEvictOnClose.

/**
   * Tests that cache on write works all the way up from the client-side.
   *
   * Performs inserts, flushes, and compactions, verifying changes in the block
   * cache along the way.
   *
   * @throws Exception
   */
@Test
public void testCacheOnWriteEvictOnClose() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[] data = Bytes.toBytes("data");
    Table table = TEST_UTIL.createTable(tableName, FAMILY);
    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
        // get the block cache and region
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        // establish baseline stats
        long startBlockCount = cache.getBlockCount();
        long startBlockHits = cache.getStats().getHitCount();
        long startBlockMiss = cache.getStats().getMissCount();
        // wait till baseline is stable, (minimal 500 ms)
        for (int i = 0; i < 5; i++) {
            Thread.sleep(100);
            if (startBlockCount != cache.getBlockCount() || startBlockHits != cache.getStats().getHitCount() || startBlockMiss != cache.getStats().getMissCount()) {
                startBlockCount = cache.getBlockCount();
                startBlockHits = cache.getStats().getHitCount();
                startBlockMiss = cache.getStats().getMissCount();
                i = -1;
            }
        }
        // insert data
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // data was in memstore so don't expect any changes
        assertEquals(startBlockCount, cache.getBlockCount());
        assertEquals(startBlockHits, cache.getStats().getHitCount());
        assertEquals(startBlockMiss, cache.getStats().getMissCount());
        // flush the data
        System.out.println("Flushing cache");
        region.flush(true);
        // expect one more block in cache, no change in hits/misses
        long expectedBlockCount = startBlockCount + 1;
        long expectedBlockHits = startBlockHits;
        long expectedBlockMiss = startBlockMiss;
        assertEquals(expectedBlockCount, cache.getBlockCount());
        assertEquals(expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        // read the data and expect same blocks, one new hit, no misses
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        assertEquals(expectedBlockCount, cache.getBlockCount());
        assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        // insert a second column, read the row, no new blocks, one new hit
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        assertEquals(expectedBlockCount, cache.getBlockCount());
        assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        assertEquals(++expectedBlockCount, cache.getBlockCount());
        assertEquals(expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        // compact, net minus two blocks, two hits, no misses
        System.out.println("Compacting");
        assertEquals(2, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        // evicted two blocks, cached none
        expectedBlockCount -= 2;
        assertEquals(expectedBlockCount, cache.getBlockCount());
        expectedBlockHits += 2;
        assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
        assertEquals(expectedBlockHits, cache.getStats().getHitCount());
        // read the row, this should be a cache miss because we don't cache data
        // blocks on compaction
        r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        // cached one data block
        expectedBlockCount += 1;
        assertEquals(expectedBlockCount, cache.getBlockCount());
        assertEquals(expectedBlockHits, cache.getStats().getHitCount());
        assertEquals(++expectedBlockMiss, cache.getStats().getMissCount());
    }
}
Also used : Store(org.apache.hadoop.hbase.regionserver.Store) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 5 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.

the class TestBlockEvictionFromClient method setCacheProperties.

private BlockCache setCacheProperties(Region region) {
    Iterator<Store> strItr = region.getStores().iterator();
    BlockCache cache = null;
    while (strItr.hasNext()) {
        Store store = strItr.next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        // Use the last one
        cache = cacheConf.getBlockCache();
    }
    return cache;
}
Also used : BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) Store(org.apache.hadoop.hbase.regionserver.Store) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Aggregations

Store (org.apache.hadoop.hbase.regionserver.Store)21 Region (org.apache.hadoop.hbase.regionserver.Region)7 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)4 Configuration (org.apache.hadoop.conf.Configuration)4 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)4 InternalScanner (org.apache.hadoop.hbase.regionserver.InternalScanner)4 List (java.util.List)3 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)3 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)3 Test (org.junit.jupiter.api.Test)3 IOException (java.io.IOException)2 RegionTooBusyException (org.apache.hadoop.hbase.RegionTooBusyException)2 TableName (org.apache.hadoop.hbase.TableName)2 Table (org.apache.hadoop.hbase.client.Table)2 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)2 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)2 StoreScanner (uk.gov.gchq.gaffer.hbasestore.coprocessor.scanner.StoreScanner)2 HashMap (java.util.HashMap)1 Map (java.util.Map)1