Search in sources :

Example 66 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.

the class IncrementSummingScannerTest method testMultiColumnFlushAndCompact.

@Test
public void testMultiColumnFlushAndCompact() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testMultiColumnFlushAndCompact");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    byte[] columnBytes2 = Bytes.toBytes("c2");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        long now = 1;
        byte[] row1 = Bytes.toBytes("row1");
        byte[] row2 = Bytes.toBytes("row2");
        // Initial put to row1,c2
        Put row1P = new Put(row1);
        row1P.add(familyBytes, columnBytes2, now - 1, Bytes.toBytes(5L));
        region.put(row1P);
        // Initial put to row2,c
        Put row2P = new Put(row2);
        row2P.add(familyBytes, columnBytes, now - 1, Bytes.toBytes(10L));
        region.put(row2P);
        // Generate some increments
        long ts = now;
        for (int i = 0; i < 50; i++) {
            region.put(generateIncrementPut(familyBytes, columnBytes, row1, ts));
            region.put(generateIncrementPut(familyBytes, columnBytes, row2, ts));
            region.put(generateIncrementPut(familyBytes, columnBytes2, row1, ts));
            ts++;
        }
        // First scanner represents flush scanner
        RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(new Scan().setMaxVersions()), ScanType.COMPACT_RETAIN_DELETES, now + 15, -1);
        // Second scanner is a user scan, this is to help in easy asserts
        scanner = new IncrementSummingScanner(region, -1, scanner, ScanType.USER_SCAN);
        List<Cell> results = Lists.newArrayList();
        assertTrue(scanner.next(results, ScannerContext.newBuilder().setBatchLimit(10).build()));
        assertEquals(2, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals("row1", Bytes.toString(cell.getRow()));
        assertEquals("c", Bytes.toString(cell.getQualifier()));
        assertEquals(50, Bytes.toLong(cell.getValue()));
        cell = results.get(1);
        assertNotNull(cell);
        assertEquals("row1", Bytes.toString(cell.getRow()));
        assertEquals("c2", Bytes.toString(cell.getQualifier()));
        assertEquals(55, Bytes.toLong(cell.getValue()));
        results.clear();
        assertFalse(scanner.next(results, ScannerContext.newBuilder().setBatchLimit(10).build()));
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals("row2", Bytes.toString(cell.getRow()));
        assertEquals(60, Bytes.toLong(cell.getValue()));
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HBase10CDH550Test(co.cask.cdap.data.hbase.HBase10CDH550Test) Test(org.junit.Test)

Example 67 with RegionScanner

use of org.apache.hadoop.hbase.regionserver.RegionScanner in project cdap by caskdata.

the class IncrementSummingScannerTest method verifyCounts.

private void verifyCounts(HRegion region, Scan scan, long[] counts, int batch, long[] upperVisBound) throws Exception {
    // The idea is to chain IncrementSummingScanner: first couple respect the upperVisBound and may produce multiple
    // cells for single value. This is what happens during flush or compaction. Second one will mimic user scan over
    // flushed or compacted: it should merge all delta increments appropriately.
    RegionScanner scanner = region.getScanner(scan);
    for (int i = 0; i < upperVisBound.length; i++) {
        scanner = new IncrementSummingScanner(region, batch, scanner, ScanType.COMPACT_RETAIN_DELETES, upperVisBound[i], -1);
    }
    scanner = new IncrementSummingScanner(region, batch, scanner, ScanType.USER_SCAN);
    // init with false if loop will execute zero times
    boolean hasMore = counts.length > 0;
    for (long count : counts) {
        List<Cell> results = Lists.newArrayList();
        hasMore = scanner.next(results);
        assertEquals(1, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals(count, Bytes.toLong(cell.getValue()));
    }
    assertFalse(hasMore);
}
Also used : RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)67 Scan (org.apache.hadoop.hbase.client.Scan)52 Cell (org.apache.hadoop.hbase.Cell)45 Test (org.junit.Test)28 Put (org.apache.hadoop.hbase.client.Put)27 ArrayList (java.util.ArrayList)20 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)20 TableId (co.cask.cdap.data2.util.TableId)17 Region (org.apache.hadoop.hbase.regionserver.Region)14 List (java.util.List)11 Delete (org.apache.hadoop.hbase.client.Delete)11 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)10 KeyValue (org.apache.hadoop.hbase.KeyValue)8 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)7 Configuration (org.apache.hadoop.conf.Configuration)6 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)6 FirstKeyOnlyFilter (org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter)6 ColumnReference (org.apache.phoenix.hbase.index.covered.update.ColumnReference)6 IOException (java.io.IOException)5 PTable (org.apache.phoenix.schema.PTable)4