Search in sources :

Example 81 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method testIncrementScanningWithBatchAndUVB.

@Test
public void testIncrementScanningWithBatchAndUVB() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScannerWithUpperVisibilityBound");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        long start = 0;
        long now = start;
        long counter1 = 0;
        // adding 5 delta increments
        for (int i = 0; i < 5; i++) {
            Put p = new Put(Bytes.toBytes("r1"), now++);
            p.add(familyBytes, columnBytes, Bytes.toBytes(1L));
            p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
            region.put(p);
            counter1++;
        }
        // Also: we want different combinations of batch limit & uvbs
        for (int i = 0; i < 7; i++) {
            for (int k = 0; k < 4; k++) {
                long[] uvbs = new long[k];
                for (int l = 0; l < uvbs.length; l++) {
                    uvbs[l] = start + (k + 1) * (l + 1);
                }
                verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1 }, i > 0 ? i : -1, uvbs);
            }
        }
        // Now test same with two groups of increments
        int counter2 = 0;
        for (int i = 0; i < 5; i++) {
            Put p = new Put(Bytes.toBytes("r2"), now + i);
            p.add(familyBytes, columnBytes, Bytes.toBytes(2L));
            p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
            region.put(p);
            counter2 += 2;
        }
        for (int i = 0; i < 12; i++) {
            for (int k = 0; k < 4; k++) {
                long[] uvbs = new long[k];
                for (int l = 0; l < uvbs.length; l++) {
                    uvbs[l] = start + (k + 1) * (l + 1);
                }
                verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1, counter2 }, i > 0 ? i : -1, uvbs);
            }
        }
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) HBase12CDH570Test(co.cask.cdap.data.hbase.HBase12CDH570Test) Test(org.junit.Test)

Example 82 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method testIncrementScanning.

@Test
public void testIncrementScanning() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScanner");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        // test handling of a single increment value alone
        Put p = new Put(Bytes.toBytes("r1"));
        p.add(familyBytes, columnBytes, Bytes.toBytes(3L));
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        Scan scan = new Scan();
        RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        List<Cell> results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals(3L, Bytes.toLong(cell.getValue()));
        // test handling of a single total sum
        p = new Put(Bytes.toBytes("r2"));
        p.add(familyBytes, columnBytes, Bytes.toBytes(5L));
        region.put(p);
        scan = new Scan(Bytes.toBytes("r2"));
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(5L, Bytes.toLong(cell.getValue()));
        // test handling of multiple increment values
        long now = System.currentTimeMillis();
        p = new Put(Bytes.toBytes("r3"));
        for (int i = 0; i < 5; i++) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes((long) (i + 1)));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        scan = new Scan(Bytes.toBytes("r3"));
        scan.setMaxVersions();
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(15L, Bytes.toLong(cell.getValue()));
        // test handling of multiple increment values followed by a total sum, then other increments
        now = System.currentTimeMillis();
        p = new Put(Bytes.toBytes("r4"));
        for (int i = 0; i < 3; i++) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        // this put will appear as a "total" sum prior to all the delta puts
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, columnBytes, now - 5, Bytes.toBytes(5L));
        region.put(p);
        scan = new Scan(Bytes.toBytes("r4"));
        scan.setMaxVersions();
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(8L, Bytes.toLong(cell.getValue()));
        // test handling of an increment column followed by a non-increment column
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c2"), Bytes.toBytes("value"));
        region.put(p);
        scan = new Scan(Bytes.toBytes("r4"));
        scan.setMaxVersions();
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(2, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(8L, Bytes.toLong(cell.getValue()));
        cell = results.get(1);
        assertNotNull(cell);
        assertEquals("value", Bytes.toString(cell.getValue()));
        // test handling of an increment column followed by a delete
        now = System.currentTimeMillis();
        Delete d = new Delete(Bytes.toBytes("r5"));
        d.deleteColumn(familyBytes, columnBytes, now - 3);
        region.delete(d);
        p = new Put(Bytes.toBytes("r5"));
        for (int i = 2; i >= 0; i--) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        scan = new Scan(Bytes.toBytes("r5"));
        scan.setMaxVersions();
        scan.setRaw(true);
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.COMPACT_RETAIN_DELETES);
        results = Lists.newArrayList();
        scanner.next(results);
        // delete marker will not be returned for user scan
        assertEquals(2, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(3L, Bytes.toLong(cell.getValue(), IncrementHandlerState.DELTA_MAGIC_PREFIX.length, 8));
        // next cell should be the delete
        cell = results.get(1);
        assertTrue(CellUtil.isDelete(cell));
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) Delete(org.apache.hadoop.hbase.client.Delete) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HBase10Test(co.cask.cdap.data.hbase.HBase10Test) Test(org.junit.Test)

Example 83 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Example 84 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method testIncrementScanningWithBatchAndUVB.

@Test
public void testIncrementScanningWithBatchAndUVB() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScannerWithUpperVisibilityBound");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        long start = 0;
        long now = start;
        long counter1 = 0;
        // adding 5 delta increments
        for (int i = 0; i < 5; i++) {
            Put p = new Put(Bytes.toBytes("r1"), now++);
            p.add(familyBytes, columnBytes, Bytes.toBytes(1L));
            p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
            region.put(p);
            counter1++;
        }
        // Also: we want different combinations of batch limit & uvbs
        for (int i = 0; i < 7; i++) {
            for (int k = 0; k < 4; k++) {
                long[] uvbs = new long[k];
                for (int l = 0; l < uvbs.length; l++) {
                    uvbs[l] = start + (k + 1) * (l + 1);
                }
                verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1 }, i > 0 ? i : -1, uvbs);
            }
        }
        // Now test same with two groups of increments
        int counter2 = 0;
        for (int i = 0; i < 5; i++) {
            Put p = new Put(Bytes.toBytes("r2"), now + i);
            p.add(familyBytes, columnBytes, Bytes.toBytes(2L));
            p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
            region.put(p);
            counter2 += 2;
        }
        for (int i = 0; i < 12; i++) {
            for (int k = 0; k < 4; k++) {
                long[] uvbs = new long[k];
                for (int l = 0; l < uvbs.length; l++) {
                    uvbs[l] = start + (k + 1) * (l + 1);
                }
                verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1, counter2 }, i > 0 ? i : -1, uvbs);
            }
        }
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) HBase10Test(co.cask.cdap.data.hbase.HBase10Test) Test(org.junit.Test)

Example 85 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class HBase11Test method forEachRegion.

@Override
public <T> Map<byte[], T> forEachRegion(byte[] tableName, Function<HRegion, T> function) {
    MiniHBaseCluster hbaseCluster = getHBaseCluster();
    Map<byte[], T> results = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    // make sure consumer config cache is updated
    for (JVMClusterUtil.RegionServerThread t : hbaseCluster.getRegionServerThreads()) {
        List<Region> serverRegions = t.getRegionServer().getOnlineRegions(TableName.valueOf(tableName));
        for (Region region : serverRegions) {
            results.put(region.getRegionInfo().getRegionName(), function.apply((HRegion) region));
        }
    }
    return results;
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) TreeMap(java.util.TreeMap)

Aggregations

HRegion (org.apache.hadoop.hbase.regionserver.HRegion)148 Test (org.junit.Test)88 Put (org.apache.hadoop.hbase.client.Put)56 Path (org.apache.hadoop.fs.Path)40 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 Scan (org.apache.hadoop.hbase.client.Scan)37 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)36 Cell (org.apache.hadoop.hbase.Cell)35 TableId (co.cask.cdap.data2.util.TableId)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)28 IOException (java.io.IOException)26 WAL (org.apache.hadoop.hbase.wal.WAL)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)22 TableName (org.apache.hadoop.hbase.TableName)22 Configuration (org.apache.hadoop.conf.Configuration)21 Result (org.apache.hadoop.hbase.client.Result)21 Region (org.apache.hadoop.hbase.regionserver.Region)21 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)19 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)19