use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method testFlushAndCompact.
@Test
public void testFlushAndCompact() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestFlushAndCompact");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
// load an initial set of increments
long ts = System.currentTimeMillis();
byte[] row1 = Bytes.toBytes("row1");
for (int i = 0; i < 50; i++) {
Put p = new Put(row1);
p.add(familyBytes, columnBytes, ts, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
ts++;
region.put(p);
}
byte[] row2 = Bytes.toBytes("row2");
ts = System.currentTimeMillis();
// start with a full put
Put row2P = new Put(row2);
row2P.add(familyBytes, columnBytes, ts++, Bytes.toBytes(10L));
region.put(row2P);
for (int i = 0; i < 10; i++) {
Put p = new Put(row2);
p.add(familyBytes, columnBytes, ts++, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
}
// force a region flush
region.flushcache(true, false);
region.waitForFlushesAndCompactions();
Result r1 = region.get(new Get(row1));
assertNotNull(r1);
assertFalse(r1.isEmpty());
// row1 should have a full put aggregating all 50 incrments
Cell r1Cell = r1.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r1Cell);
assertEquals(50L, Bytes.toLong(r1Cell.getValue()));
Result r2 = region.get(new Get(row2));
assertNotNull(r2);
assertFalse(r2.isEmpty());
// row2 should have a full put aggregating prior put + 10 increments
Cell r2Cell = r2.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r2Cell);
assertEquals(20L, Bytes.toLong(r2Cell.getValue()));
// add 30 more increments to row2
for (int i = 0; i < 30; i++) {
Put p = new Put(row2);
p.add(familyBytes, columnBytes, ts++, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
}
// row2 should now have a full put aggregating prior 20 value + 30 increments
r2 = region.get(new Get(row2));
assertNotNull(r2);
assertFalse(r2.isEmpty());
r2Cell = r2.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r2Cell);
assertEquals(50L, Bytes.toLong(r2Cell.getValue()));
// force another region flush
region.flushcache(true, false);
region.waitForFlushesAndCompactions();
// add 100 more increments to row2
for (int i = 0; i < 100; i++) {
Put p = new Put(row2);
p.add(familyBytes, columnBytes, ts++, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
}
// row2 should now have a full put aggregating prior 50 value + 100 increments
r2 = region.get(new Get(row2));
assertNotNull(r2);
assertFalse(r2.isEmpty());
r2Cell = r2.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r2Cell);
assertEquals(150L, Bytes.toLong(r2Cell.getValue()));
} finally {
region.close();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method testWithBatchLimit.
@Test
public void testWithBatchLimit() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testWithBatchLimit");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c2");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long now = System.currentTimeMillis();
// put a non increment columns
Put p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, Bytes.toBytes("c1"), Bytes.toBytes("value1"));
region.put(p);
// now put some increment deltas in a column
p = new Put(Bytes.toBytes("r4"));
for (int i = 0; i < 3; i++) {
p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
}
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
// put some non - increment columns
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, Bytes.toBytes("c3"), Bytes.toBytes("value3"));
region.put(p);
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, Bytes.toBytes("c4"), Bytes.toBytes("value4"));
region.put(p);
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, Bytes.toBytes("c5"), Bytes.toBytes("value5"));
region.put(p);
// this put will appear as a "total" sum prior to all the delta puts
p = new Put(Bytes.toBytes("r4"));
p.add(familyBytes, columnBytes, now - 5, Bytes.toBytes(5L));
region.put(p);
Scan scan = new Scan(Bytes.toBytes("r4"));
scan.setMaxVersions();
RegionScanner scanner = new IncrementSummingScanner(region, 3, region.getScanner(scan), ScanType.USER_SCAN);
List<Cell> results = Lists.newArrayList();
scanner.next(results);
assertEquals(3, results.size());
Cell cell = results.get(0);
assertNotNull(cell);
assertEquals("value1", Bytes.toString(cell.getValue()));
cell = results.get(1);
assertNotNull(cell);
assertEquals(8L, Bytes.toLong(cell.getValue()));
cell = results.get(2);
assertNotNull(cell);
assertEquals("value3", Bytes.toString(cell.getValue()));
} finally {
region.close();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method testMultiColumnFlushAndCompact.
@Test
public void testMultiColumnFlushAndCompact() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testMultiColumnFlushAndCompact");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
byte[] columnBytes2 = Bytes.toBytes("c2");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long now = 1;
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
// Initial put to row1,c2
Put row1P = new Put(row1);
row1P.add(familyBytes, columnBytes2, now - 1, Bytes.toBytes(5L));
region.put(row1P);
// Initial put to row2,c
Put row2P = new Put(row2);
row2P.add(familyBytes, columnBytes, now - 1, Bytes.toBytes(10L));
region.put(row2P);
// Generate some increments
long ts = now;
for (int i = 0; i < 50; i++) {
region.put(generateIncrementPut(familyBytes, columnBytes, row1, ts));
region.put(generateIncrementPut(familyBytes, columnBytes, row2, ts));
region.put(generateIncrementPut(familyBytes, columnBytes2, row1, ts));
ts++;
}
// First scanner represents flush scanner
RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(new Scan().setMaxVersions()), ScanType.COMPACT_RETAIN_DELETES, now + 15, -1);
// Second scanner is a user scan, this is to help in easy asserts
scanner = new IncrementSummingScanner(region, -1, scanner, ScanType.USER_SCAN);
List<Cell> results = Lists.newArrayList();
assertTrue(scanner.next(results, ScannerContext.newBuilder().setBatchLimit(10).build()));
assertEquals(2, results.size());
Cell cell = results.get(0);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c", Bytes.toString(cell.getQualifier()));
assertEquals(50, Bytes.toLong(cell.getValue()));
cell = results.get(1);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c2", Bytes.toString(cell.getQualifier()));
assertEquals(55, Bytes.toLong(cell.getValue()));
results.clear();
assertFalse(scanner.next(results, ScannerContext.newBuilder().setBatchLimit(10).build()));
assertEquals(1, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals("row2", Bytes.toString(cell.getRow()));
assertEquals(60, Bytes.toLong(cell.getValue()));
} finally {
region.close();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class HBase12CDH570Test method forEachRegion.
@Override
public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) {
MiniHBaseCluster hbaseCluster = getHBaseCluster();
Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
// make sure consumer config cache is updated
for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
List<Region> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName));
for (Region region : serverRegions) {
results.put(region.getRegionInfo().getRegionName(), function.apply((HRegion) region));
}
}
return results;
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method testFlushAndCompact.
@Test
public void testFlushAndCompact() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestFlushAndCompact");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
// load an initial set of increments
long ts = System.currentTimeMillis();
byte[] row1 = Bytes.toBytes("row1");
for (int i = 0; i < 50; i++) {
Put p = new Put(row1);
p.add(familyBytes, columnBytes, ts, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
ts++;
region.put(p);
}
byte[] row2 = Bytes.toBytes("row2");
ts = System.currentTimeMillis();
// start with a full put
Put row2P = new Put(row2);
row2P.add(familyBytes, columnBytes, ts++, Bytes.toBytes(10L));
region.put(row2P);
for (int i = 0; i < 10; i++) {
Put p = new Put(row2);
p.add(familyBytes, columnBytes, ts++, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
}
// force a region flush
region.flushcache(true, false);
region.waitForFlushesAndCompactions();
Result r1 = region.get(new Get(row1));
assertNotNull(r1);
assertFalse(r1.isEmpty());
// row1 should have a full put aggregating all 50 incrments
Cell r1Cell = r1.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r1Cell);
assertEquals(50L, Bytes.toLong(r1Cell.getValue()));
Result r2 = region.get(new Get(row2));
assertNotNull(r2);
assertFalse(r2.isEmpty());
// row2 should have a full put aggregating prior put + 10 increments
Cell r2Cell = r2.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r2Cell);
assertEquals(20L, Bytes.toLong(r2Cell.getValue()));
// add 30 more increments to row2
for (int i = 0; i < 30; i++) {
Put p = new Put(row2);
p.add(familyBytes, columnBytes, ts++, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
}
// row2 should now have a full put aggregating prior 20 value + 30 increments
r2 = region.get(new Get(row2));
assertNotNull(r2);
assertFalse(r2.isEmpty());
r2Cell = r2.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r2Cell);
assertEquals(50L, Bytes.toLong(r2Cell.getValue()));
// force another region flush
region.flushcache(true, false);
region.waitForFlushesAndCompactions();
// add 100 more increments to row2
for (int i = 0; i < 100; i++) {
Put p = new Put(row2);
p.add(familyBytes, columnBytes, ts++, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
}
// row2 should now have a full put aggregating prior 50 value + 100 increments
r2 = region.get(new Get(row2));
assertNotNull(r2);
assertFalse(r2.isEmpty());
r2Cell = r2.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r2Cell);
assertEquals(150L, Bytes.toLong(r2Cell.getValue()));
} finally {
region.close();
}
}
Aggregations