Search in sources :

Example 51 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method testIncrementScanningWithBatchAndUVB.

@Test
public void testIncrementScanningWithBatchAndUVB() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScannerWithUpperVisibilityBound");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        long start = 0;
        long now = start;
        long counter1 = 0;
        // adding 5 delta increments
        for (int i = 0; i < 5; i++) {
            Put p = new Put(Bytes.toBytes("r1"), now++);
            p.add(familyBytes, columnBytes, Bytes.toBytes(1L));
            p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
            region.put(p);
            counter1++;
        }
        // Also: we want different combinations of batch limit & uvbs
        for (int i = 0; i < 7; i++) {
            for (int k = 0; k < 4; k++) {
                long[] uvbs = new long[k];
                for (int l = 0; l < uvbs.length; l++) {
                    uvbs[l] = start + (k + 1) * (l + 1);
                }
                verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1 }, i > 0 ? i : -1, uvbs);
            }
        }
        // Now test same with two groups of increments
        int counter2 = 0;
        for (int i = 0; i < 5; i++) {
            Put p = new Put(Bytes.toBytes("r2"), now + i);
            p.add(familyBytes, columnBytes, Bytes.toBytes(2L));
            p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
            region.put(p);
            counter2 += 2;
        }
        for (int i = 0; i < 12; i++) {
            for (int k = 0; k < 4; k++) {
                long[] uvbs = new long[k];
                for (int l = 0; l < uvbs.length; l++) {
                    uvbs[l] = start + (k + 1) * (l + 1);
                }
                verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1, counter2 }, i > 0 ? i : -1, uvbs);
            }
        }
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) HBase10CDHTest(co.cask.cdap.data.hbase.HBase10CDHTest) Test(org.junit.Test)

Example 52 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method testMultiColumnFlushAndCompact.

@Test
public void testMultiColumnFlushAndCompact() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testMultiColumnFlushAndCompact");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    byte[] columnBytes2 = Bytes.toBytes("c2");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        long now = 1;
        byte[] row1 = Bytes.toBytes("row1");
        byte[] row2 = Bytes.toBytes("row2");
        // Initial put to row1,c2
        Put row1P = new Put(row1);
        row1P.add(familyBytes, columnBytes2, now - 1, Bytes.toBytes(5L));
        region.put(row1P);
        // Initial put to row2,c
        Put row2P = new Put(row2);
        row2P.add(familyBytes, columnBytes, now - 1, Bytes.toBytes(10L));
        region.put(row2P);
        // Generate some increments
        long ts = now;
        for (int i = 0; i < 50; i++) {
            region.put(generateIncrementPut(familyBytes, columnBytes, row1, ts));
            region.put(generateIncrementPut(familyBytes, columnBytes, row2, ts));
            region.put(generateIncrementPut(familyBytes, columnBytes2, row1, ts));
            ts++;
        }
        // First scanner represents flush scanner
        RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(new Scan().setMaxVersions()), ScanType.COMPACT_RETAIN_DELETES, now + 15, -1);
        // Second scanner is a user scan, this is to help in easy asserts
        scanner = new IncrementSummingScanner(region, -1, scanner, ScanType.USER_SCAN);
        List<Cell> results = Lists.newArrayList();
        assertTrue(scanner.next(results, 10));
        assertEquals(2, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals("row1", Bytes.toString(cell.getRow()));
        assertEquals("c", Bytes.toString(cell.getQualifier()));
        assertEquals(50, Bytes.toLong(cell.getValue()));
        cell = results.get(1);
        assertNotNull(cell);
        assertEquals("row1", Bytes.toString(cell.getRow()));
        assertEquals("c2", Bytes.toString(cell.getQualifier()));
        assertEquals(55, Bytes.toLong(cell.getValue()));
        results.clear();
        assertFalse(scanner.next(results, 10));
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals("row2", Bytes.toString(cell.getRow()));
        assertEquals(60, Bytes.toLong(cell.getValue()));
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HBase10CDHTest(co.cask.cdap.data.hbase.HBase10CDHTest) Test(org.junit.Test)

Example 53 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method testIncrementScanningWithBatchAndUVB.

@Test
public void testIncrementScanningWithBatchAndUVB() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScannerWithUpperVisibilityBound");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        long start = 0;
        long now = start;
        long counter1 = 0;
        // adding 5 delta increments
        for (int i = 0; i < 5; i++) {
            Put p = new Put(Bytes.toBytes("r1"), now++);
            p.add(familyBytes, columnBytes, Bytes.toBytes(1L));
            p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
            region.put(p);
            counter1++;
        }
        // Also: we want different combinations of batch limit & uvbs
        for (int i = 0; i < 7; i++) {
            for (int k = 0; k < 4; k++) {
                long[] uvbs = new long[k];
                for (int l = 0; l < uvbs.length; l++) {
                    uvbs[l] = start + (k + 1) * (l + 1);
                }
                verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1 }, i > 0 ? i : -1, uvbs);
            }
        }
        // Now test same with two groups of increments
        int counter2 = 0;
        for (int i = 0; i < 5; i++) {
            Put p = new Put(Bytes.toBytes("r2"), now + i);
            p.add(familyBytes, columnBytes, Bytes.toBytes(2L));
            p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
            region.put(p);
            counter2 += 2;
        }
        for (int i = 0; i < 12; i++) {
            for (int k = 0; k < 4; k++) {
                long[] uvbs = new long[k];
                for (int l = 0; l < uvbs.length; l++) {
                    uvbs[l] = start + (k + 1) * (l + 1);
                }
                verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1, counter2 }, i > 0 ? i : -1, uvbs);
            }
        }
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test) HBase98Test(co.cask.cdap.data.hbase.HBase98Test)

Example 54 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class HBaseQueueRegionObserver method postFlush.

@Override
public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException {
    // Record whether the region is empty after a flush
    HRegion region = e.getEnvironment().getRegion();
    // After a flush, if the memstore size is zero and there are no store files for any stores in the region
    // then the region must be empty
    long numStoreFiles = numStoreFilesForRegion(e);
    long memstoreSize = region.getMemstoreSize().get();
    LOG.debug(String.format("Region %s: memstore size = %s, num store files = %s", region.getRegionInfo().getRegionNameAsString(), memstoreSize, numStoreFiles));
    if (memstoreSize == 0 && numStoreFiles == 0) {
        if (compactionState != null) {
            compactionState.persistRegionEmpty(System.currentTimeMillis());
        }
    }
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion)

Example 55 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Aggregations

HRegion (org.apache.hadoop.hbase.regionserver.HRegion)148 Test (org.junit.Test)88 Put (org.apache.hadoop.hbase.client.Put)56 Path (org.apache.hadoop.fs.Path)40 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 Scan (org.apache.hadoop.hbase.client.Scan)37 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)36 Cell (org.apache.hadoop.hbase.Cell)35 TableId (co.cask.cdap.data2.util.TableId)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)28 IOException (java.io.IOException)26 WAL (org.apache.hadoop.hbase.wal.WAL)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)22 TableName (org.apache.hadoop.hbase.TableName)22 Configuration (org.apache.hadoop.conf.Configuration)21 Result (org.apache.hadoop.hbase.client.Result)21 Region (org.apache.hadoop.hbase.regionserver.Region)21 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)19 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)19