use of org.apache.hadoop.hbase.regionserver.Store in project Gaffer by gchq.
the class GafferCoprocessorTest method shouldDelegatePreCompactWithRequestToStoreScanner.
@Test
public void shouldDelegatePreCompactWithRequestToStoreScanner() throws IOException {
// Given
final ObserverContext<RegionCoprocessorEnvironment> e = mock(ObserverContext.class);
final Store store = mock(Store.class);
final InternalScanner scanner = mock(InternalScanner.class);
final CompactionRequest request = mock(CompactionRequest.class);
// When
final StoreScanner storeScanner = (StoreScanner) coprocessor.preCompact(e, store, scanner, ScanType.COMPACT_DROP_DELETES, request);
// Then
assertNotNull(storeScanner);
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class StoreHotnessProtector method start.
public void start(Map<byte[], List<Cell>> familyMaps) throws RegionTooBusyException {
if (!isEnable()) {
return;
}
String tooBusyStore = null;
boolean aboveParallelThreadLimit = false;
boolean aboveParallelPrePutLimit = false;
for (Map.Entry<byte[], List<Cell>> e : familyMaps.entrySet()) {
Store store = this.region.getStore(e.getKey());
if (store == null || e.getValue() == null) {
continue;
}
if (e.getValue().size() > this.parallelPutToStoreThreadLimitCheckMinColumnCount) {
// we need to try to add #preparePutCount at first because preparePutToStoreMap will be
// cleared when changing the configuration.
int preparePutCount = preparePutToStoreMap.computeIfAbsent(e.getKey(), key -> new AtomicInteger()).incrementAndGet();
boolean storeAboveThread = store.getCurrentParallelPutCount() > this.parallelPutToStoreThreadLimit;
boolean storeAbovePrePut = preparePutCount > this.parallelPreparePutToStoreThreadLimit;
if (storeAboveThread || storeAbovePrePut) {
tooBusyStore = (tooBusyStore == null ? store.getColumnFamilyName() : tooBusyStore + "," + store.getColumnFamilyName());
}
aboveParallelThreadLimit |= storeAboveThread;
aboveParallelPrePutLimit |= storeAbovePrePut;
if (LOG.isTraceEnabled()) {
LOG.trace(store.getColumnFamilyName() + ": preparePutCount=" + preparePutCount + "; currentParallelPutCount=" + store.getCurrentParallelPutCount());
}
}
}
if (aboveParallelThreadLimit || aboveParallelPrePutLimit) {
String msg = "StoreTooBusy," + this.region.getRegionInfo().getRegionNameAsString() + ":" + tooBusyStore + " Above " + (aboveParallelThreadLimit ? "parallelPutToStoreThreadLimit(" + this.parallelPutToStoreThreadLimit + ")" : "") + (aboveParallelThreadLimit && aboveParallelPrePutLimit ? " or " : "") + (aboveParallelPrePutLimit ? "parallelPreparePutToStoreThreadLimit(" + this.parallelPreparePutToStoreThreadLimit + ")" : "");
LOG.trace(msg);
throw new RegionTooBusyException(msg);
}
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class HFileArchiveTestingUtil method getStoreArchivePath.
public static Path getStoreArchivePath(HBaseTestingUtil util, String tableName, byte[] storeName) throws IOException {
byte[] table = Bytes.toBytes(tableName);
// get the RS and region serving our table
List<HRegion> servingRegions = util.getHBaseCluster().getRegions(table);
HRegion region = servingRegions.get(0);
// check that we actually have some store files that were archived
Store store = region.getStore(storeName);
return HFileArchiveTestingUtil.getStoreArchivePath(util.getConfiguration(), region, store);
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class TestLowLatencySpaceQuotas method testSnapshotSizes.
@Test
public void testSnapshotSizes() throws Exception {
TableName tn = helper.createTableWithRegions(1);
// Set a quota
QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, SpaceQuotaHelperForTests.ONE_GIGABYTE, SpaceViolationPolicy.NO_INSERTS);
admin.setQuota(settings);
// Write some data and flush it to disk.
final long sizePerBatch = 2L * SpaceQuotaHelperForTests.ONE_MEGABYTE;
helper.writeData(tn, sizePerBatch);
admin.flush(tn);
final String snapshot1 = "snapshot1";
admin.snapshot(snapshot1, tn, SnapshotType.SKIPFLUSH);
// Compute the size of the file for the Region we'll send to archive
Region region = Iterables.getOnlyElement(TEST_UTIL.getHBaseCluster().getRegions(tn));
List<? extends Store> stores = region.getStores();
long summer = 0;
for (Store store : stores) {
summer += store.getStorefilesSize();
}
final long storeFileSize = summer;
// Wait for the table to show the usage
TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
@Override
boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
return snapshot.getUsage() == storeFileSize;
}
});
// Spoof a "full" computation of snapshot size. Normally the chore handles this, but we want
// to test in the absence of this chore.
FileArchiverNotifier notifier = TEST_UTIL.getHBaseCluster().getMaster().getSnapshotQuotaObserverChore().getNotifierForTable(tn);
notifier.computeAndStoreSnapshotSizes(Collections.singletonList(snapshot1));
// Force a major compaction to create a new file and push the old file to the archive
TEST_UTIL.compact(tn, true);
// After moving the old file to archive/, the space of this table should double
// We have a new file created by the majc referenced by the table and the snapshot still
// referencing the old file.
TEST_UTIL.waitFor(30 * 1000, 500, new SpaceQuotaSnapshotPredicate(conn, tn) {
@Override
boolean evaluate(SpaceQuotaSnapshot snapshot) throws Exception {
return snapshot.getUsage() >= 2 * storeFileSize;
}
});
try (Table quotaTable = conn.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
Result r = quotaTable.get(QuotaTableUtil.makeGetForSnapshotSize(tn, snapshot1));
assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty());
assertTrue(r.advance());
assertEquals("The snapshot's size should be the same as the origin store file", storeFileSize, QuotaTableUtil.parseSnapshotSize(r.current()));
r = quotaTable.get(QuotaTableUtil.createGetNamespaceSnapshotSize(tn.getNamespaceAsString()));
assertTrue("Expected a non-null, non-empty Result", r != null && !r.isEmpty());
assertTrue(r.advance());
assertEquals("The snapshot's size should be the same as the origin store file", storeFileSize, QuotaTableUtil.parseSnapshotSize(r.current()));
}
}
use of org.apache.hadoop.hbase.regionserver.Store in project hbase by apache.
the class AbstractTestLogRolling method testCompactionRecordDoesntBlockRolling.
/**
* Tests that logs are deleted when some region has a compaction
* record in WAL and no other records. See HBASE-8597.
*/
@Test
public void testCompactionRecordDoesntBlockRolling() throws Exception {
Table table = null;
// When the hbase:meta table can be opened, the region servers are running
Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
try {
table = createTestTable(getName());
server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
HRegion region = server.getRegions(table.getName()).get(0);
final WAL log = server.getWAL(region.getRegionInfo());
Store s = region.getStore(HConstants.CATALOG_FAMILY);
// Put some stuff into table, to make sure we have some files to compact.
for (int i = 1; i <= 2; ++i) {
doPut(table, i);
admin.flush(table.getName());
}
// don't flush yet, or compaction might trigger before we roll WAL
doPut(table, 3);
assertEquals("Should have no WAL after initial writes", 0, AbstractFSWALProvider.getNumRolledLogFiles(log));
assertEquals(2, s.getStorefilesCount());
// Roll the log and compact table, to have compaction record in the 2nd WAL.
log.rollWriter();
assertEquals("Should have WAL; one table is not flushed", 1, AbstractFSWALProvider.getNumRolledLogFiles(log));
admin.flush(table.getName());
region.compact(false);
// Wait for compaction in case if flush triggered it before us.
Assert.assertNotNull(s);
for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) {
Threads.sleepWithoutInterrupt(200);
}
assertEquals("Compaction didn't happen", 1, s.getStorefilesCount());
// Write some value to the table so the WAL cannot be deleted until table is flushed.
// Now 2nd WAL will have both compaction and put record for table.
doPut(table, 0);
// 1st WAL deleted, 2nd not deleted yet.
log.rollWriter();
assertEquals("Should have WAL; one table is not flushed", 1, AbstractFSWALProvider.getNumRolledLogFiles(log));
// Flush table to make latest WAL obsolete; write another record, and roll again.
admin.flush(table.getName());
doPut(table, 1);
// Now 2nd WAL is deleted and 3rd is added.
log.rollWriter();
assertEquals("Should have 1 WALs at the end", 1, AbstractFSWALProvider.getNumRolledLogFiles(log));
} finally {
if (t != null)
t.close();
if (table != null)
table.close();
}
}
Aggregations