Search in sources :

Example 71 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class HBaseTableTest method testEnableIncrements.

@Test
public void testEnableIncrements() throws Exception {
    // setup a table with increments disabled and with it enabled
    String disableTableName = "incr-disable";
    String enabledTableName = "incr-enable";
    TableId disabledTableId = hBaseTableUtil.createHTableId(NAMESPACE1, disableTableName);
    TableId enabledTableId = hBaseTableUtil.createHTableId(NAMESPACE1, enabledTableName);
    DatasetProperties propsDisabled = TableProperties.builder().setReadlessIncrementSupport(false).setConflictDetection(ConflictDetection.COLUMN).build();
    HBaseTableAdmin disabledAdmin = getTableAdmin(CONTEXT1, disableTableName, propsDisabled);
    disabledAdmin.create();
    HBaseAdmin admin = TEST_HBASE.getHBaseAdmin();
    DatasetProperties propsEnabled = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
    HBaseTableAdmin enabledAdmin = getTableAdmin(CONTEXT1, enabledTableName, propsEnabled);
    enabledAdmin.create();
    try {
        try {
            HTableDescriptor htd = hBaseTableUtil.getHTableDescriptor(admin, disabledTableId);
            List<String> cps = htd.getCoprocessors();
            assertFalse(cps.contains(IncrementHandler.class.getName()));
            htd = hBaseTableUtil.getHTableDescriptor(admin, enabledTableId);
            cps = htd.getCoprocessors();
            assertTrue(cps.contains(IncrementHandler.class.getName()));
        } finally {
            admin.close();
        }
        BufferingTable table = getTable(CONTEXT1, enabledTableName, propsEnabled);
        byte[] row = Bytes.toBytes("row1");
        byte[] col = Bytes.toBytes("col1");
        DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
        Transaction tx = txSystemClient.startShort();
        table.startTx(tx);
        table.increment(row, col, 10);
        table.commitTx();
        // verify that value was written as a delta value
        final byte[] expectedValue = Bytes.add(IncrementHandlerState.DELTA_MAGIC_PREFIX, Bytes.toBytes(10L));
        final AtomicBoolean foundValue = new AtomicBoolean();
        byte[] enabledTableNameBytes = hBaseTableUtil.getHTableDescriptor(admin, enabledTableId).getName();
        TEST_HBASE.forEachRegion(enabledTableNameBytes, new Function<HRegion, Object>() {

            @Override
            public Object apply(HRegion hRegion) {
                Scan scan = hBaseTableUtil.buildScan().build();
                try {
                    RegionScanner scanner = hRegion.getScanner(scan);
                    List<Cell> results = Lists.newArrayList();
                    boolean hasMore;
                    do {
                        hasMore = scanner.next(results);
                        for (Cell cell : results) {
                            if (CellUtil.matchingValue(cell, expectedValue)) {
                                foundValue.set(true);
                            }
                        }
                    } while (hasMore);
                } catch (IOException ioe) {
                    fail("IOException scanning region: " + ioe.getMessage());
                }
                return null;
            }
        });
        assertTrue("Should have seen the expected encoded delta value in the " + enabledTableName + " table region", foundValue.get());
    } finally {
        disabledAdmin.drop();
        enabledAdmin.drop();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) IOException(java.io.IOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BufferingTable(co.cask.cdap.data2.dataset2.lib.table.BufferingTable) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Transaction(org.apache.tephra.Transaction) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) Scan(org.apache.hadoop.hbase.client.Scan) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell) BufferingTableTest(co.cask.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Example 72 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class HBaseQueueTest method forceEviction.

@Override
protected void forceEviction(QueueName queueName, int numGroups) throws Exception {
    TableId tableId = ((HBaseQueueAdmin) queueAdmin).getDataTableId(queueName);
    byte[] tableName = tableUtil.getHTableDescriptor(hbaseAdmin, tableId).getName();
    // make sure consumer config cache is updated with the latest tx snapshot
    takeTxSnapshot();
    final Class coprocessorClass = tableUtil.getQueueRegionObserverClassForVersion();
    TEST_HBASE.forEachRegion(tableName, new Function<HRegion, Object>() {

        public Object apply(HRegion region) {
            try {
                Coprocessor cp = region.getCoprocessorHost().findCoprocessor(coprocessorClass.getName());
                // calling cp.updateCache(), NOTE: cannot do normal cast and stuff because cp is loaded
                // by different classloader (corresponds to a cp's jar)
                LOG.info("forcing update of transaction state cache for HBaseQueueRegionObserver of region: {}", region);
                Method getTxStateCache = cp.getClass().getDeclaredMethod("getTxStateCache");
                getTxStateCache.setAccessible(true);
                Object txStateCache = getTxStateCache.invoke(cp);
                // the one returned is of type DefaultTransactionStateCache.
                // The refreshState method is a private method of its parent, TransactionStateCache
                Method refreshState = txStateCache.getClass().getSuperclass().getDeclaredMethod("refreshState");
                refreshState.setAccessible(true);
                refreshState.invoke(txStateCache);
                LOG.info("forcing update cache for HBaseQueueRegionObserver of region: {}", region);
                Method updateCache = cp.getClass().getDeclaredMethod("updateCache");
                updateCache.setAccessible(true);
                updateCache.invoke(cp);
            } catch (Exception e) {
                throw Throwables.propagate(e);
            }
            return null;
        }
    });
    // Force a table flush to trigger eviction
    TEST_HBASE.forceRegionFlush(tableName);
    TEST_HBASE.forceRegionCompact(tableName, true);
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Coprocessor(org.apache.hadoop.hbase.Coprocessor) AfterClass(org.junit.AfterClass) BeforeClass(org.junit.BeforeClass) Method(java.lang.reflect.Method) IOException(java.io.IOException) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException)

Example 73 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method testIncrementScanning.

@Test
public void testIncrementScanning() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScanner");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        // test handling of a single increment value alone
        Put p = new Put(Bytes.toBytes("r1"));
        p.add(familyBytes, columnBytes, Bytes.toBytes(3L));
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        Scan scan = new Scan();
        RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        List<Cell> results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals(3L, Bytes.toLong(cell.getValue()));
        // test handling of a single total sum
        p = new Put(Bytes.toBytes("r2"));
        p.add(familyBytes, columnBytes, Bytes.toBytes(5L));
        region.put(p);
        scan = new Scan(Bytes.toBytes("r2"));
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(5L, Bytes.toLong(cell.getValue()));
        // test handling of multiple increment values
        long now = System.currentTimeMillis();
        p = new Put(Bytes.toBytes("r3"));
        for (int i = 0; i < 5; i++) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes((long) (i + 1)));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        scan = new Scan(Bytes.toBytes("r3"));
        scan.setMaxVersions();
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(15L, Bytes.toLong(cell.getValue()));
        // test handling of multiple increment values followed by a total sum, then other increments
        now = System.currentTimeMillis();
        p = new Put(Bytes.toBytes("r4"));
        for (int i = 0; i < 3; i++) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        // this put will appear as a "total" sum prior to all the delta puts
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, columnBytes, now - 5, Bytes.toBytes(5L));
        region.put(p);
        scan = new Scan(Bytes.toBytes("r4"));
        scan.setMaxVersions();
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(8L, Bytes.toLong(cell.getValue()));
        // test handling of an increment column followed by a non-increment column
        p = new Put(Bytes.toBytes("r4"));
        p.add(familyBytes, Bytes.toBytes("c2"), Bytes.toBytes("value"));
        region.put(p);
        scan = new Scan(Bytes.toBytes("r4"));
        scan.setMaxVersions();
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.USER_SCAN);
        results = Lists.newArrayList();
        scanner.next(results);
        assertEquals(2, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(8L, Bytes.toLong(cell.getValue()));
        cell = results.get(1);
        assertNotNull(cell);
        assertEquals("value", Bytes.toString(cell.getValue()));
        // test handling of an increment column followed by a delete
        now = System.currentTimeMillis();
        Delete d = new Delete(Bytes.toBytes("r5"));
        d.deleteColumn(familyBytes, columnBytes, now - 3);
        region.delete(d);
        p = new Put(Bytes.toBytes("r5"));
        for (int i = 2; i >= 0; i--) {
            p.add(familyBytes, columnBytes, now - i, Bytes.toBytes(1L));
        }
        p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
        region.put(p);
        scan = new Scan(Bytes.toBytes("r5"));
        scan.setMaxVersions();
        scan.setRaw(true);
        scanner = new IncrementSummingScanner(region, -1, region.getScanner(scan), ScanType.COMPACT_RETAIN_DELETES);
        results = Lists.newArrayList();
        scanner.next(results);
        // delete marker will not be returned for user scan
        assertEquals(2, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals(3L, Bytes.toLong(cell.getValue(), IncrementHandlerState.DELTA_MAGIC_PREFIX.length, 8));
        // next cell should be the delete
        cell = results.get(1);
        assertTrue(CellUtil.isDelete(cell));
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) Delete(org.apache.hadoop.hbase.client.Delete) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HBase12CDH570Test(co.cask.cdap.data.hbase.HBase12CDH570Test) Test(org.junit.Test)

Example 74 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Example 75 with HRegion

use of org.apache.hadoop.hbase.regionserver.HRegion in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Aggregations

HRegion (org.apache.hadoop.hbase.regionserver.HRegion)148 Test (org.junit.Test)88 Put (org.apache.hadoop.hbase.client.Put)56 Path (org.apache.hadoop.fs.Path)40 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 Scan (org.apache.hadoop.hbase.client.Scan)37 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)36 Cell (org.apache.hadoop.hbase.Cell)35 TableId (co.cask.cdap.data2.util.TableId)32 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)28 IOException (java.io.IOException)26 WAL (org.apache.hadoop.hbase.wal.WAL)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)22 TableName (org.apache.hadoop.hbase.TableName)22 Configuration (org.apache.hadoop.conf.Configuration)21 Result (org.apache.hadoop.hbase.client.Result)21 Region (org.apache.hadoop.hbase.regionserver.Region)21 MiniHBaseCluster (org.apache.hadoop.hbase.MiniHBaseCluster)19 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)19