use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method testIncrementScanningWithBatchAndUVB.
@Test
public void testIncrementScanningWithBatchAndUVB() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScannerWithUpperVisibilityBound");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long start = 0;
long now = start;
long counter1 = 0;
// adding 5 delta increments
for (int i = 0; i < 5; i++) {
Put p = new Put(Bytes.toBytes("r1"), now++);
p.add(familyBytes, columnBytes, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
counter1++;
}
// Also: we want different combinations of batch limit & uvbs
for (int i = 0; i < 7; i++) {
for (int k = 0; k < 4; k++) {
long[] uvbs = new long[k];
for (int l = 0; l < uvbs.length; l++) {
uvbs[l] = start + (k + 1) * (l + 1);
}
verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1 }, i > 0 ? i : -1, uvbs);
}
}
// Now test same with two groups of increments
int counter2 = 0;
for (int i = 0; i < 5; i++) {
Put p = new Put(Bytes.toBytes("r2"), now + i);
p.add(familyBytes, columnBytes, Bytes.toBytes(2L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
counter2 += 2;
}
for (int i = 0; i < 12; i++) {
for (int k = 0; k < 4; k++) {
long[] uvbs = new long[k];
for (int l = 0; l < uvbs.length; l++) {
uvbs[l] = start + (k + 1) * (l + 1);
}
verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1, counter2 }, i > 0 ? i : -1, uvbs);
}
}
} finally {
region.close();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method testMultiColumnFlushAndCompact.
@Test
public void testMultiColumnFlushAndCompact() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testMultiColumnFlushAndCompact");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
byte[] columnBytes2 = Bytes.toBytes("c2");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long now = 1;
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
// Initial put to row1,c2
Put row1P = new Put(row1);
row1P.add(familyBytes, columnBytes2, now - 1, Bytes.toBytes(5L));
region.put(row1P);
// Initial put to row2,c
Put row2P = new Put(row2);
row2P.add(familyBytes, columnBytes, now - 1, Bytes.toBytes(10L));
region.put(row2P);
// Generate some increments
long ts = now;
for (int i = 0; i < 50; i++) {
region.put(generateIncrementPut(familyBytes, columnBytes, row1, ts));
region.put(generateIncrementPut(familyBytes, columnBytes, row2, ts));
region.put(generateIncrementPut(familyBytes, columnBytes2, row1, ts));
ts++;
}
// First scanner represents flush scanner
RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(new Scan().setMaxVersions()), ScanType.COMPACT_RETAIN_DELETES, now + 15, -1);
// Second scanner is a user scan, this is to help in easy asserts
scanner = new IncrementSummingScanner(region, -1, scanner, ScanType.USER_SCAN);
List<Cell> results = Lists.newArrayList();
assertTrue(scanner.next(results, 10));
assertEquals(2, results.size());
Cell cell = results.get(0);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c", Bytes.toString(cell.getQualifier()));
assertEquals(50, Bytes.toLong(cell.getValue()));
cell = results.get(1);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c2", Bytes.toString(cell.getQualifier()));
assertEquals(55, Bytes.toLong(cell.getValue()));
results.clear();
assertFalse(scanner.next(results, 10));
assertEquals(1, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals("row2", Bytes.toString(cell.getRow()));
assertEquals(60, Bytes.toLong(cell.getValue()));
} finally {
region.close();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method testIncrementScanningWithBatchAndUVB.
@Test
public void testIncrementScanningWithBatchAndUVB() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScannerWithUpperVisibilityBound");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long start = 0;
long now = start;
long counter1 = 0;
// adding 5 delta increments
for (int i = 0; i < 5; i++) {
Put p = new Put(Bytes.toBytes("r1"), now++);
p.add(familyBytes, columnBytes, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
counter1++;
}
// Also: we want different combinations of batch limit & uvbs
for (int i = 0; i < 7; i++) {
for (int k = 0; k < 4; k++) {
long[] uvbs = new long[k];
for (int l = 0; l < uvbs.length; l++) {
uvbs[l] = start + (k + 1) * (l + 1);
}
verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1 }, i > 0 ? i : -1, uvbs);
}
}
// Now test same with two groups of increments
int counter2 = 0;
for (int i = 0; i < 5; i++) {
Put p = new Put(Bytes.toBytes("r2"), now + i);
p.add(familyBytes, columnBytes, Bytes.toBytes(2L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
counter2 += 2;
}
for (int i = 0; i < 12; i++) {
for (int k = 0; k < 4; k++) {
long[] uvbs = new long[k];
for (int l = 0; l < uvbs.length; l++) {
uvbs[l] = start + (k + 1) * (l + 1);
}
verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1, counter2 }, i > 0 ? i : -1, uvbs);
}
}
} finally {
region.close();
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class HBaseQueueRegionObserver method postFlush.
@Override
public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
// Record whether the region is empty after a flush
HRegion region = e.getEnvironment().getRegion();
// After a flush, if the memstore size is zero and there are no store files for any stores in the region
// then the region must be empty
long numStoreFiles = numStoreFilesForRegion(e);
long memstoreSize = region.getMemstoreSize().get();
LOG.debug(String.format("Region %s: memstore size = %s, num store files = %s", region.getRegionInfo().getRegionNameAsString(), memstoreSize, numStoreFiles));
if (memstoreSize == 0 && numStoreFiles == 0) {
if (compactionState != null) {
compactionState.persistRegionEmpty(System.currentTimeMillis());
}
}
}
use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.
the class IncrementSummingScannerTest method createRegion.
static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
cfd.setMaxVersions(Integer.MAX_VALUE);
cfd.setKeepDeletedCells(true);
htd.addFamily(cfd);
htd.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor desc = htd.build();
String tableName = desc.getNameAsString();
Path tablePath = new Path("/tmp/" + tableName);
Path hlogPath = new Path("/tmp/hlog-" + tableName);
FileSystem fs = FileSystem.get(hConf);
assertTrue(fs.mkdirs(tablePath));
WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
WAL hLog = walFactory.getWAL(new byte[] { 1 });
HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Aggregations