use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class HBaseTestBase method forceRegionFlush.
/**
* Force and block on a flush to occur on all regions of table {@code tableName}.
* @param tableName The table whose regions should be flushed.
*/
public void forceRegionFlush(byte[] tableName) throws IOException {
MiniHBaseCluster hbaseCluster = getHBaseCluster();
if (hbaseCluster != null) {
TableName qualifiedTableName = TableName.valueOf(tableName);
for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(qualifiedTableName);
List<Runnable> flushers = new ArrayList<>();
for (HRegion region : serverRegions) {
flushers.add(createFlushRegion(region));
}
parallelRun(flushers);
LOG.info("RegionServer {}: Flushed {} regions for table {}", t.getRegionServer().getServerName().toString(), serverRegions.size(), Bytes.toStringBinary(tableName));
}
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class HBaseTestBase method forceRegionCompact.
/**
* Force and block on a compaction on all regions of table {@code tableName}.
* @param tableName The table whose regions should be compacted.
* @param majorCompact Whether a major compaction should be requested.
*/
public void forceRegionCompact(byte[] tableName, boolean majorCompact) throws IOException {
MiniHBaseCluster hbaseCluster = getHBaseCluster();
if (hbaseCluster != null) {
TableName qualifiedTableName = TableName.valueOf(tableName);
for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(qualifiedTableName);
List<Runnable> compacters = new ArrayList<>();
for (HRegion region : serverRegions) {
compacters.add(createCompactRegion(region, majorCompact));
}
parallelRun(compacters);
LOG.info("RegionServer {}: Compacted {} regions for table {}", t.getRegionServer().getServerName().toString(), serverRegions.size(), Bytes.toStringBinary(tableName));
}
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method testWithBatchLimit.
@Test
public void testWithBatchLimit() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testWithBatchLimit");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c2");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long now = System.currentTimeMillis();
// put a non increment columns
Put p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, Bytes.toBytes("c1"), Bytes.toBytes("value1"));
region.put(p);
// now put some increment deltas in a column
p = new Put(Bytes.toBytes("r4"));
for (int i = 0; i < 3; i++) {
p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
}
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
// put some non - increment columns
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, Bytes.toBytes("c3"), Bytes.toBytes("value3"));
region.put(p);
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, Bytes.toBytes("c4"), Bytes.toBytes("value4"));
region.put(p);
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, Bytes.toBytes("c5"), Bytes.toBytes("value5"));
region.put(p);
// this put will appear as a "total" sum prior to all the delta puts
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, columnBytes, now - 5, Bytes.toBytes(5L));
region.put(p);
Scan scan = new Scan(Bytes.toBytes("r4"));
scan.setMaxVersions();
RegionScanner scanner = new IncrementSummingScanner(region, 3, region.getScanner(scan), ScanType.USER_SCAN);
List<Cell> results = Lists.newArrayList();
scanner.next(results);
assertEquals(3, results.size());
Cell cell = results.get(0);
assertNotNull(cell);
assertEquals("value1", Bytes.toString(cell.getValue()));
cell = results.get(1);
assertNotNull(cell);
assertEquals(8L, Bytes.toLong(cell.getValue()));
cell = results.get(2);
assertNotNull(cell);
assertEquals("value3", Bytes.toString(cell.getValue()));
} finally {
region.close();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class MessageTableRegionObserver method postFlush.
@Override
public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
// Record whether the region is empty after a flush
HRegion region = e.getEnvironment().getRegion();
// After a flush, if the memstore size is zero and there are no store files for any stores in the region
// then the region must be empty
long numStoreFiles = numStoreFilesForRegion(e);
long memstoreSize = region.getMemstoreSize().get();
LOG.debug(String.format("Region %s: memstore size = %s, num store files = %s", region.getRegionInfo().getRegionNameAsString(), memstoreSize, numStoreFiles));
if (memstoreSize == 0 && numStoreFiles == 0) {
if (compactionState != null) {
compactionState.persistRegionEmpty(System.currentTimeMillis());
}
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class HBase10CDHTest method forEachRegion.
@Override
public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) {
MiniHBaseCluster hbaseCluster = getHBaseCluster();
Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
// make sure consumer config cache is updated
for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName));
for (HRegion region : serverRegions) {
results.put(region.getRegionName(), function.apply(region));
}
}
return results;
}
Aggregations