use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.
the class IncrementSummingScannerTest method testMultiColumnFlushAndCompact.
@Test
public void testMultiColumnFlushAndCompact() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testMultiColumnFlushAndCompact");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
byte[] columnBytes2 = Bytes.toBytes("c2");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long now = 1;
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
// Initial put to row1,c2
Put row1P = new Put(row1);
row1P.add(familyBytes, columnBytes2, now - 1, Bytes.toBytes(5L));
region.put(row1P);
// Initial put to row2,c
Put row2P = new Put(row2);
row2P.add(familyBytes, columnBytes, now - 1, Bytes.toBytes(10L));
region.put(row2P);
// Generate some increments
long ts = now;
for (int i = 0; i < 50; i++) {
region.put(generateIncrementPut(familyBytes, columnBytes, row1, ts));
region.put(generateIncrementPut(familyBytes, columnBytes, row2, ts));
region.put(generateIncrementPut(familyBytes, columnBytes2, row1, ts));
ts++;
}
// First scanner represents flush scanner
RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(new Scan().setMaxVersions()), ScanType.COMPACT_RETAIN_DELETES, now + 15, -1);
// Second scanner is a user scan, this is to help in easy asserts
scanner = new IncrementSummingScanner(region, -1, scanner, ScanType.USER_SCAN);
List<Cell> results = Lists.newArrayList();
assertTrue(scanner.next(results, ScannerContext.newBuilder().setBatchLimit(10).build()));
assertEquals(2, results.size());
Cell cell = results.get(0);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c", Bytes.toString(cell.getQualifier()));
assertEquals(50, Bytes.toLong(cell.getValue()));
cell = results.get(1);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c2", Bytes.toString(cell.getQualifier()));
assertEquals(55, Bytes.toLong(cell.getValue()));
results.clear();
assertFalse(scanner.next(results, ScannerContext.newBuilder().setBatchLimit(10).build()));
assertEquals(1, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals("row2", Bytes.toString(cell.getRow()));
assertEquals(60, Bytes.toLong(cell.getValue()));
} finally {
region.close();
}
}
use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.
the class IncrementSummingScannerTest method verifyCounts.
private void verifyCounts(HRegion region, Scan scan, long[] counts, int batch, long[] upperVisBound) throws Exception {
// The idea is to chain IncrementSummingScanner: first couple respect the upperVisBound and may produce multiple
// cells for single value. This is what happens during flush or compaction. Second one will mimic user scan over
// flushed or compacted: it should merge all delta increments appropriately.
RegionScanner scanner = region.getScanner(scan);
for (int i = 0; i < upperVisBound.length; i++) {
scanner = new IncrementSummingScanner(region, batch, scanner, ScanType.COMPACT_RETAIN_DELETES, upperVisBound[i], -1);
}
scanner = new IncrementSummingScanner(region, batch, scanner, ScanType.USER_SCAN);
// init with false if loop will execute zero times
boolean hasMore = counts.length > 0;
for (long count : counts) {
List<Cell> results = Lists.newArrayList();
hasMore = scanner.next(results);
assertEquals(1, results.size());
Cell cell = results.get(0);
assertNotNull(cell);
assertEquals(count, Bytes.toLong(cell.getValue()));
}
assertFalse(hasMore);
}
Aggregations