use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class MessageTableRegionObserver method postFlush.
@Override
public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
// Record whether the region is empty after a flush
HRegion region = e.getEnvironment().getRegion();
// After a flush, if the memstore size is zero and there are no store files for any stores in the region
// then the region must be empty
long numStoreFiles = numStoreFilesForRegion(e);
long memstoreSize = region.getMemstoreSize().get();
LOG.debug(String.format("Region %s: memstore size = %s, num store files = %s", region.getRegionInfo().getRegionNameAsString(), memstoreSize, numStoreFiles));
if (memstoreSize == 0 && numStoreFiles == 0) {
if (compactionState != null) {
compactionState.persistRegionEmpty(System.currentTimeMillis());
}
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class HBase10CDH550Test method forEachRegion.
@Override
public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) {
MiniHBaseCluster hbaseCluster = getHBaseCluster();
Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
// make sure consumer config cache is updated
for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
List<HRegion> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName));
for (HRegion region : serverRegions) {
results.put(region.getRegionName(), function.apply(region));
}
}
return results;
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method createRegion.
static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
cfd.setMaxVersions(Integer.MAX_VALUE);
cfd.setKeepDeletedCells(true);
htd.addFamily(cfd);
htd.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor desc = htd.build();
String tableName = desc.getNameAsString();
Path tablePath = new Path("/tmp/" + tableName);
Path hlogPath = new Path("/tmp/hlog-" + tableName);
FileSystem fs = FileSystem.get(hConf);
assertTrue(fs.mkdirs(tablePath));
WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
WAL hLog = walFactory.getWAL(new byte[] { 1 });
HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method testIncrementScanning.
@Test
public void testIncrementScanning() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScanner");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
// test handling of a single increment value alone
Put p = new Put(Bytes.toBytes("r1"));
p.add(familyBytes, columnBytes, Bytes.toBytes(3L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
Scan scan = new Scan();
RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
List<Cell> results = Lists.newArrayList();
scanner.next(results);
assertEquals(1, results.size());
Cell cell = results.get(0);
assertNotNull(cell);
assertEquals(3L, Bytes.toLong(cell.getValue()));
// test handling of a single total sum
p = new Put(Bytes.toBytes("r2"));
p.add(familyBytes, columnBytes, Bytes.toBytes(5L));
region.put(p);
scan = new Scan(Bytes.toBytes("r2"));
scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
results = Lists.newArrayList();
scanner.next(results);
assertEquals(1, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals(5L, Bytes.toLong(cell.getValue()));
// test handling of multiple increment values
long now = System.currentTimeMillis();
p = new Put(Bytes.toBytes("r3"));
for (int i = 0; i < 5; i++) {
p.add(familyBytes, columnBytes, now - i, Bytes.toBytes((long) (i + 1)));
}
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
scan = new Scan(Bytes.toBytes("r3"));
scan.setMaxVersions();
scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
results = Lists.newArrayList();
scanner.next(results);
assertEquals(1, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals(15L, Bytes.toLong(cell.getValue()));
// test handling of multiple increment values followed by a total sum, then other increments
now = System.currentTimeMillis();
p = new Put(Bytes.toBytes("r4"));
for (int i = 0; i < 3; i++) {
p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
}
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
// this put will appear as a "total" sum prior to all the delta puts
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, columnBytes, now - 5, Bytes.toBytes(5L));
region.put(p);
scan = new Scan(Bytes.toBytes("r4"));
scan.setMaxVersions();
scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
results = Lists.newArrayList();
scanner.next(results);
assertEquals(1, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals(8L, Bytes.toLong(cell.getValue()));
// test handling of an increment column followed by a non-increment column
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, Bytes.toBytes("c2"), Bytes.toBytes("value"));
region.put(p);
scan = new Scan(Bytes.toBytes("r4"));
scan.setMaxVersions();
scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
results = Lists.newArrayList();
scanner.next(results);
assertEquals(2, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals(8L, Bytes.toLong(cell.getValue()));
cell = results.get(1);
assertNotNull(cell);
assertEquals("value", Bytes.toString(cell.getValue()));
// test handling of an increment column followed by a delete
now = System.currentTimeMillis();
Delete d = new Delete(Bytes.toBytes("r5"));
d.deleteColumn(familyBytes, columnBytes, now - 3);
region.delete(d);
p = new Put(Bytes.toBytes("r5"));
for (int i = 2; i >= 0; i--) {
p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
}
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
scan = new Scan(Bytes.toBytes("r5"));
scan.setMaxVersions();
scan.setRaw(true);
scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.COMPACT_RETAIN_DELETES);
results = Lists.newArrayList();
scanner.next(results);
// delete marker will not be returned for user scan
assertEquals(2, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals(3L, Bytes.toLong(cell.getValue(), IncrementHandlerState.DELTA_MAGIC_PREFIX.length, 8));
// next cell should be the delete
cell = results.get(1);
assertTrue(CellUtil.isDelete(cell));
} finally {
region.close();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class HBaseQueueRegionObserver method postFlush.
@Override
public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
// Record whether the region is empty after a flush
HRegion region = e.getEnvironment().getRegion();
// After a flush, if the memstore size is zero and there are no store files for any stores in the region
// then the region must be empty
long numStoreFiles = numStoreFilesForRegion(e);
long memstoreSize = region.getMemstoreSize().get();
LOG.debug(String.format("Region %s: memstore size = %s, num store files = %s", region.getRegionInfo().getRegionNameAsString(), memstoreSize, numStoreFiles));
if (memstoreSize == 0 && numStoreFiles == 0) {
if (compactionState != null) {
compactionState.persistRegionEmpty(System.currentTimeMillis());
}
}
}
Aggregations