Search in sources :

Example 41 with HBaseTableUtil

use of io.cdap.cdap.data2.util.hbase.HBaseTableUtil in project cdap by caskdata.

the class HBaseTableTest method testEnforceTxLifetime.

@Test
public void testEnforceTxLifetime() throws Exception {
    String tableName = "enforce-tx-lifetime";
    DatasetProperties datasetProperties = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, datasetProperties);
    admin.create();
    DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
    DatasetSpecification spec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).properties(datasetProperties.getProperties()).build();
    try {
        final HBaseTable table = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
        Transaction tx = txSystemClient.startShort();
        table.startTx(tx);
        table.put(b("row1"), b("col1"), b("val1"));
        table.put(b("inc1"), b("col1"), Bytes.toBytes(10L));
        table.commitTx();
        table.postTxCommit();
        table.close();
        CConfiguration testCConf = CConfiguration.copy(cConf);
        // No mutations on tables using testCConf will succeed.
        testCConf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 0);
        try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
            // A put should fail
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.put(b("row2"), b("col1"), b("val1"));
                }
            });
            // A delete should also fail
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.delete(b("row1"));
                }
            });
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.delete(b("row1"), b("col1"));
                }
            });
            // So should an increment
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.increment(b("inc1"), b("col1"), 10);
                }
            });
            // incrementAndGet gets converted to a put internally
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.incrementAndGet(b("inc1"), b("col1"), 10);
                }
            });
        }
        // Even safe increments should fail (this happens when readless increment is done from a mapreduce job)
        try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, ImmutableMap.of(HBaseTable.SAFE_INCREMENTS, "true"), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
            // So should an increment
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.increment(b("inc1"), b("col1"), 10);
                }
            });
            // incrementAndGet gets converted to a put internally
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.incrementAndGet(b("inc1"), b("col1"), 10);
                }
            });
        }
    } finally {
        admin.drop();
        admin.close();
    }
}
Also used : Transaction(org.apache.tephra.Transaction) DatasetProperties(io.cdap.cdap.api.dataset.DatasetProperties) DatasetSpecification(io.cdap.cdap.api.dataset.DatasetSpecification) DatasetAdmin(io.cdap.cdap.api.dataset.DatasetAdmin) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) CConfiguration(io.cdap.cdap.common.conf.CConfiguration) BufferingTableTest(io.cdap.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Example 42 with HBaseTableUtil

use of io.cdap.cdap.data2.util.hbase.HBaseTableUtil in project cdap by caskdata.

the class HBaseTableTest method beforeClass.

@BeforeClass
public static void beforeClass() throws Exception {
    cConf = CConfiguration.create();
    hBaseTableUtil = new HBaseTableUtilFactory(cConf, new SimpleNamespaceQueryAdmin()).get();
    // TODO: CDAP-1634 - Explore a way to not have every HBase test class do this.
    ddlExecutor = new HBaseDDLExecutorFactory(cConf, TEST_HBASE.getConfiguration()).get();
    ddlExecutor.createNamespaceIfNotExists(hBaseTableUtil.getHBaseNamespace(NAMESPACE1));
    ddlExecutor.createNamespaceIfNotExists(hBaseTableUtil.getHBaseNamespace(NAMESPACE2));
}
Also used : SimpleNamespaceQueryAdmin(io.cdap.cdap.common.namespace.SimpleNamespaceQueryAdmin) HBaseDDLExecutorFactory(io.cdap.cdap.data2.util.hbase.HBaseDDLExecutorFactory) HBaseTableUtilFactory(io.cdap.cdap.data2.util.hbase.HBaseTableUtilFactory) BeforeClass(org.junit.BeforeClass)

Example 43 with HBaseTableUtil

use of io.cdap.cdap.data2.util.hbase.HBaseTableUtil in project cdap by caskdata.

the class HBaseTableTest method testTTL.

@Test
public void testTTL() throws Exception {
    // for the purpose of this test it is fine not to configure ttl when creating table: we want to see if it
    // applies on reading
    int ttl = 1;
    String ttlTable = "ttl";
    String noTtlTable = "nottl";
    DatasetProperties props = TableProperties.builder().setTTL(ttl).build();
    getTableAdmin(CONTEXT1, ttlTable, props).create();
    DatasetSpecification ttlTableSpec = DatasetSpecification.builder(ttlTable, HBaseTable.class.getName()).properties(props.getProperties()).build();
    try (HBaseTable table = new HBaseTable(CONTEXT1, ttlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
        DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
        Transaction tx = txSystemClient.startShort();
        table.startTx(tx);
        table.put(b("row1"), b("col1"), b("val1"));
        table.commitTx();
        TimeUnit.MILLISECONDS.sleep(1010);
        tx = txSystemClient.startShort();
        table.startTx(tx);
        table.put(b("row2"), b("col2"), b("val2"));
        table.commitTx();
        // now, we should not see first as it should have expired, but see the last one
        tx = txSystemClient.startShort();
        table.startTx(tx);
        byte[] val = table.get(b("row1"), b("col1"));
        if (val != null) {
            LOG.info("Unexpected value " + Bytes.toStringBinary(val));
        }
        Assert.assertNull(val);
        Assert.assertArrayEquals(b("val2"), table.get(b("row2"), b("col2")));
        // test a table with no TTL
        DatasetProperties props2 = TableProperties.builder().setTTL(Tables.NO_TTL).build();
        getTableAdmin(CONTEXT1, noTtlTable, props2).create();
        DatasetSpecification noTtlTableSpec = DatasetSpecification.builder(noTtlTable, HBaseTable.class.getName()).properties(props2.getProperties()).build();
        HBaseTable table2 = new HBaseTable(CONTEXT1, noTtlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
        tx = txSystemClient.startShort();
        table2.startTx(tx);
        table2.put(b("row1"), b("col1"), b("val1"));
        table2.commitTx();
        TimeUnit.SECONDS.sleep(2);
        tx = txSystemClient.startShort();
        table2.startTx(tx);
        table2.put(b("row2"), b("col2"), b("val2"));
        table2.commitTx();
        // if ttl is -1 (unlimited), it should see both
        tx = txSystemClient.startShort();
        table2.startTx(tx);
        Assert.assertArrayEquals(b("val1"), table2.get(b("row1"), b("col1")));
        Assert.assertArrayEquals(b("val2"), table2.get(b("row2"), b("col2")));
    }
}
Also used : Transaction(org.apache.tephra.Transaction) DatasetProperties(io.cdap.cdap.api.dataset.DatasetProperties) DatasetSpecification(io.cdap.cdap.api.dataset.DatasetSpecification) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) BufferingTableTest(io.cdap.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Example 44 with HBaseTableUtil

use of io.cdap.cdap.data2.util.hbase.HBaseTableUtil in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(io.cdap.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(io.cdap.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(io.cdap.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Example 45 with HBaseTableUtil

use of io.cdap.cdap.data2.util.hbase.HBaseTableUtil in project cdap by caskdata.

the class IncrementSummingScannerTest method createRegion.

static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
    cfd.setMaxVersions(Integer.MAX_VALUE);
    cfd.setKeepDeletedCells(true);
    htd.addFamily(cfd);
    htd.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor desc = htd.build();
    String tableName = desc.getNameAsString();
    Path tablePath = new Path("/tmp/" + tableName);
    Path hlogPath = new Path("/tmp/hlog-" + tableName);
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    WALFactory walFactory = new WALFactory(hConf, null, hlogPath.toString());
    WAL hLog = walFactory.getWAL(new byte[] { 1 });
    HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    return new HRegion(regionFS, hLog, hConf, desc, new LocalRegionServerServices(hConf, ServerName.valueOf(InetAddress.getLocalHost().getHostName(), 0, System.currentTimeMillis())));
}
Also used : Path(org.apache.hadoop.fs.Path) HTableDescriptorBuilder(io.cdap.cdap.data2.util.hbase.HTableDescriptorBuilder) WAL(org.apache.hadoop.hbase.wal.WAL) HBaseTableUtil(io.cdap.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) HRegionFileSystem(org.apache.hadoop.hbase.regionserver.HRegionFileSystem) HBaseTableUtilFactory(io.cdap.cdap.data2.util.hbase.HBaseTableUtilFactory) WALFactory(org.apache.hadoop.hbase.wal.WALFactory)

Aggregations

HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)28 TableId (io.cdap.cdap.data2.util.TableId)21 HBaseTableUtil (co.cask.cdap.data2.util.hbase.HBaseTableUtil)19 HBaseTableUtilFactory (co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory)18 HBaseTableUtil (io.cdap.cdap.data2.util.hbase.HBaseTableUtil)16 HBaseTableUtilFactory (io.cdap.cdap.data2.util.hbase.HBaseTableUtilFactory)16 NamespaceId (io.cdap.cdap.proto.id.NamespaceId)15 HTableDescriptorBuilder (co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder)14 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 Path (org.apache.hadoop.fs.Path)12 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)12 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)12 HRegionFileSystem (org.apache.hadoop.hbase.regionserver.HRegionFileSystem)12 HTableDescriptorBuilder (io.cdap.cdap.data2.util.hbase.HTableDescriptorBuilder)11 Test (org.junit.Test)11 WAL (org.apache.hadoop.hbase.wal.WAL)10 WALFactory (org.apache.hadoop.hbase.wal.WALFactory)10 IOException (java.io.IOException)7 Result (org.apache.hadoop.hbase.client.Result)6