Search in sources :

Example 16 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project Gaffer by gchq.

the class GafferCoprocessorTest method shouldDelegatePreCompactWithRequestToStoreScanner.

@Test
public void shouldDelegatePreCompactWithRequestToStoreScanner() throws IOException {
    // Given
    final ObserverContext<RegionCoprocessorEnvironment> e = mock(ObserverContext.class);
    final Store store = mock(Store.class);
    final InternalScanner scanner = mock(InternalScanner.class);
    final CompactionRequest request = mock(CompactionRequest.class);
    // When
    final StoreScanner storeScanner = (StoreScanner) coprocessor.preCompact(e, store, scanner, ScanType.COMPACT_DROP_DELETES, request);
    // Then
    assertNotNull(storeScanner);
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) InternalScanner(org.apache.hadoop.hbase.regionserver.InternalScanner) Store(org.apache.hadoop.hbase.regionserver.Store) CompactionRequest(org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest) StoreScanner(uk.gov.gchq.gaffer.hbasestore.coprocessor.scanner.StoreScanner) Test(org.junit.jupiter.api.Test)

Example 17 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.

the class StoreHotnessProtector method start.

public void start(Map<byte[], List<Cell>> familyMaps) throws RegionTooBusyException {
    if (!isEnable()) {
        return;
    }
    String tooBusyStore = null;
    boolean aboveParallelThreadLimit = false;
    boolean aboveParallelPrePutLimit = false;
    for (Map.Entry<byte[], List<Cell>> e : familyMaps.entrySet()) {
        Store store = this.region.getStore(e.getKey());
        if (store == null || e.getValue() == null) {
            continue;
        }
        if (e.getValue().size() > this.parallelPutToStoreThreadLimitCheckMinColumnCount) {
            // we need to try to add #preparePutCount at first because preparePutToStoreMap will be
            // cleared when changing the configuration.
            int preparePutCount = preparePutToStoreMap.computeIfAbsent(e.getKey(), key -> new AtomicInteger()).incrementAndGet();
            boolean storeAboveThread = store.getCurrentParallelPutCount() > this.parallelPutToStoreThreadLimit;
            boolean storeAbovePrePut = preparePutCount > this.parallelPreparePutToStoreThreadLimit;
            if (storeAboveThread || storeAbovePrePut) {
                tooBusyStore = (tooBusyStore == null ? store.getColumnFamilyName() : tooBusyStore + "," + store.getColumnFamilyName());
            }
            aboveParallelThreadLimit |= storeAboveThread;
            aboveParallelPrePutLimit |= storeAbovePrePut;
            if (LOG.isTraceEnabled()) {
                LOG.trace(store.getColumnFamilyName() + ": preparePutCount=" + preparePutCount + "; currentParallelPutCount=" + store.getCurrentParallelPutCount());
            }
        }
    }
    if (aboveParallelThreadLimit || aboveParallelPrePutLimit) {
        String msg = "StoreTooBusy," + this.region.getRegionInfo().getRegionNameAsString() + ":" + tooBusyStore + " Above " + (aboveParallelThreadLimit ? "parallelPutToStoreThreadLimit(" + this.parallelPutToStoreThreadLimit + ")" : "") + (aboveParallelThreadLimit && aboveParallelPrePutLimit ? " or " : "") + (aboveParallelPrePutLimit ? "parallelPreparePutToStoreThreadLimit(" + this.parallelPreparePutToStoreThreadLimit + ")" : "");
        LOG.trace(msg);
        throw new RegionTooBusyException(msg);
    }
}
Also used : Logger(org.slf4j.Logger) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) LoggerFactory(org.slf4j.LoggerFactory) ConcurrentMap(java.util.concurrent.ConcurrentMap) List(java.util.List) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) Store(org.apache.hadoop.hbase.regionserver.Store) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Region(org.apache.hadoop.hbase.regionserver.Region) ClassSize(org.apache.hadoop.hbase.util.ClassSize) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) Cell(org.apache.hadoop.hbase.Cell) Bytes(org.apache.hadoop.hbase.util.Bytes) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) Store(org.apache.hadoop.hbase.regionserver.Store) List(java.util.List) ConcurrentMap(java.util.concurrent.ConcurrentMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) Map(java.util.Map)

Example 18 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.

the class HFileArchiveTestingUtil method getStoreArchivePath.

public static Path getStoreArchivePath(HBaseTestingUtil util, String tableName, byte[] storeName) throws IOException {
    byte[] table = Bytes.toBytes(tableName);
    // get the RS and region serving our table
    List<HRegion> servingRegions = util.getHBaseCluster().getRegions(table);
    HRegion region = servingRegions.get(0);
    // check that we actually have some store files that were archived
    Store store = region.getStore(storeName);
    return HFileArchiveTestingUtil.getStoreArchivePath(util.getConfiguration(), region, store);
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Store(org.apache.hadoop.hbase.regionserver.Store)

Example 19 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.

the class TestLowLatencySpaceQuotas method testSnapshotSizes.

@Test
public void testSnapshotSizes() throws Exception {
    TableName tn = helper.createTableWithRegions(1);
    // Set a quota
    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
    admin.setQuota(settings);
    // Write some data and flush it to disk.
    final long sizePerBatch = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE;
    helper.writeData(tn, sizePerBatch);
    admin.flush(tn);
    final String snapshot1 = "snapshot1";
    admin.snapshot(snapshot1, tn, SnapshotType.SKIPFLUSH);
    // Compute the size of the file for the Region we'll send to archive
    Region region = Iterables.getOnlyElement(TEST_UTIL.getHBaseCluster().getRegions(tn));
    List<? extends Store> stores = region.getStores();
    long summer = 0;
    for (Store store : stores) {
        summer += store.getStorefilesSize();
    }
    final long storeFileSize = summer;
    // Wait for the table to show the usage
    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            return snapshot.getUsage() == storeFileSize;
        }
    });
    // Spoof a "full" computation of snapshot size. Normally the chore handles this, but we want
    // to test in the absence of this chore.
    FileArchiverNotifier notifier = TEST_UTIL.getHBaseCluster().getMaster().getSnapshotQuotaObserverChore().getNotifierForTable(tn);
    notifier.computeAndStoreSnapshotSizes(Collections.singletonList(snapshot1));
    // Force a major compaction to create a new file and push the old file to the archive
    TEST_UTIL.compact(tn, true);
    // After moving the old file to archive/, the space of this table should double
    // We have a new file created by the majc referenced by the table and the snapshot still
    // referencing the old file.
    TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {

        @Override
        boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
            return snapshot.getUsage() >= 2 * storeFileSize;
        }
    });
    try (Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
        Result r = quotaTable.get(QuotaTableUtil.makeGetForSnapshotSize(tn, snapshot1));
        assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty());
        assertTrue(r.advance());
        assertEquals("The snapshot's size should be the same as the origin store file", storeFileSize, QuotaTableUtil.parseSnapshotSize(r.current()));
        r = quotaTable.get(QuotaTableUtil.createGetNamespaceSnapshotSize(tn.getNamespaceAsString()));
        assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty());
        assertTrue(r.advance());
        assertEquals("The snapshot's size should be the same as the origin store file", storeFileSize, QuotaTableUtil.parseSnapshotSize(r.current()));
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Store(org.apache.hadoop.hbase.regionserver.Store) Result(org.apache.hadoop.hbase.client.Result) SpaceQuotaSnapshotPredicate(org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests.SpaceQuotaSnapshotPredicate) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) Test(org.junit.Test)

Example 20 with Store

use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.

the class AbstractTestLogRolling method testCompactionRecordDoesntBlockRolling.

/**
 * Tests that logs are deleted when some region has a compaction
 * record in WAL and no other records. See HBASE-8597.
 */
@Test
public void testCompactionRecordDoesntBlockRolling() throws Exception {
    Table table = null;
    // When the hbase:meta table can be opened, the region servers are running
    Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
    try {
        table = createTestTable(getName());
        server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
        HRegion region = server.getRegions(table.getName()).get(0);
        final WAL log = server.getWAL(region.getRegionInfo());
        Store s = region.getStore(HConstants.CATALOG_FAMILY);
        // Put some stuff into table, to make sure we have some files to compact.
        for (int i = 1; i <= 2; ++i) {
            doPut(table, i);
            admin.flush(table.getName());
        }
        // don't flush yet, or compaction might trigger before we roll WAL
        doPut(table, 3);
        assertEquals("Should have no WAL after initial writes", 0, AbstractFSWALProvider.getNumRolledLogFiles(log));
        assertEquals(2, s.getStorefilesCount());
        // Roll the log and compact table, to have compaction record in the 2nd WAL.
        log.rollWriter();
        assertEquals("Should have WAL; one table is not flushed", 1, AbstractFSWALProvider.getNumRolledLogFiles(log));
        admin.flush(table.getName());
        region.compact(false);
        // Wait for compaction in case if flush triggered it before us.
        Assert.assertNotNull(s);
        for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) {
            Threads.sleepWithoutInterrupt(200);
        }
        assertEquals("Compaction didn't happen", 1, s.getStorefilesCount());
        // Write some value to the table so the WAL cannot be deleted until table is flushed.
        // Now 2nd WAL will have both compaction and put record for table.
        doPut(table, 0);
        // 1st WAL deleted, 2nd not deleted yet.
        log.rollWriter();
        assertEquals("Should have WAL; one table is not flushed", 1, AbstractFSWALProvider.getNumRolledLogFiles(log));
        // Flush table to make latest WAL obsolete; write another record, and roll again.
        admin.flush(table.getName());
        doPut(table, 1);
        // Now 2nd WAL is deleted and 3rd is added.
        log.rollWriter();
        assertEquals("Should have 1 WALs at the end", 1, AbstractFSWALProvider.getNumRolledLogFiles(log));
    } finally {
        if (t != null)
            t.close();
        if (table != null)
            table.close();
    }
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Table(org.apache.hadoop.hbase.client.Table) WAL(org.apache.hadoop.hbase.wal.WAL) Store(org.apache.hadoop.hbase.regionserver.Store) Test(org.junit.Test)

Aggregations

Store (org.apache.hadoop.hbase.regionserver.Store)21 Region (org.apache.hadoop.hbase.regionserver.Region)7 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)4 Configuration (org.apache.hadoop.conf.Configuration)4 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)4 InternalScanner (org.apache.hadoop.hbase.regionserver.InternalScanner)4 List (java.util.List)3 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)3 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)3 Test (org.junit.jupiter.api.Test)3 IOException (java.io.IOException)2 RegionTooBusyException (org.apache.hadoop.hbase.RegionTooBusyException)2 TableName (org.apache.hadoop.hbase.TableName)2 Table (org.apache.hadoop.hbase.client.Table)2 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)2 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)2 StoreScanner (uk.gov.gchq.gaffer.hbasestore.coprocessor.scanner.StoreScanner)2 HashMap (java.util.HashMap)1 Map (java.util.Map)1