Search in sources :

Example 1 with HBaseTable

use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.

the class HBaseTableTest method testCachedEncodedTransaction.

@Test
public void testCachedEncodedTransaction() throws Exception {
    String tableName = "testEncodedTxTable";
    DatasetProperties props = DatasetProperties.EMPTY;
    getTableAdmin(CONTEXT1, tableName, props).create();
    DatasetSpecification tableSpec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).build();
    // use a transaction codec that counts the number of times encode() is called
    final AtomicInteger encodeCount = new AtomicInteger();
    final TransactionCodec codec = new TransactionCodec() {

        @Override
        public byte[] encode(Transaction tx) throws IOException {
            encodeCount.incrementAndGet();
            return super.encode(tx);
        }
    };
    // use a table util that creates an HTable that validates the encoded tx on each get
    final AtomicReference<Transaction> txRef = new AtomicReference<>();
    HBaseTableUtil util = new DelegatingHBaseTableUtil(hBaseTableUtil) {

        @Override
        public HTable createHTable(Configuration conf, TableId tableId) throws IOException {
            HTable htable = super.createHTable(conf, tableId);
            return new MinimalDelegatingHTable(htable) {

                @Override
                public Result get(org.apache.hadoop.hbase.client.Get get) throws IOException {
                    Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
                    return super.get(get);
                }

                @Override
                public Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException {
                    for (org.apache.hadoop.hbase.client.Get get : gets) {
                        Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
                    }
                    return super.get(gets);
                }

                @Override
                public ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan) throws IOException {
                    Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(scan.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
                    return super.getScanner(scan);
                }
            };
        }
    };
    HBaseTable table = new HBaseTable(CONTEXT1, tableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), util, codec);
    DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
    // test all operations: only the first one encodes
    Transaction tx = txSystemClient.startShort();
    txRef.set(tx);
    table.startTx(tx);
    table.put(b("row1"), b("col1"), b("val1"));
    Assert.assertEquals(0, encodeCount.get());
    table.get(b("row"));
    Assert.assertEquals(1, encodeCount.get());
    table.get(ImmutableList.of(new Get("a"), new Get("b")));
    Assert.assertEquals(1, encodeCount.get());
    Scanner scanner = table.scan(new Scan(null, null));
    Assert.assertEquals(1, encodeCount.get());
    scanner.close();
    table.increment(b("z"), b("z"), 0L);
    Assert.assertEquals(1, encodeCount.get());
    table.commitTx();
    table.postTxCommit();
    // test that for the next tx, we encode again
    tx = txSystemClient.startShort();
    txRef.set(tx);
    table.startTx(tx);
    table.get(b("row"));
    Assert.assertEquals(2, encodeCount.get());
    table.commitTx();
    // test that we encode again, even of postTxCommit was not called
    tx = txSystemClient.startShort();
    txRef.set(tx);
    table.startTx(tx);
    table.get(b("row"));
    Assert.assertEquals(3, encodeCount.get());
    table.commitTx();
    table.rollbackTx();
    // test that rollback does not encode the tx
    Assert.assertEquals(3, encodeCount.get());
    // test that we encode again if the previous tx rolled back
    tx = txSystemClient.startShort();
    txRef.set(tx);
    table.startTx(tx);
    table.get(b("row"));
    Assert.assertEquals(4, encodeCount.get());
    table.commitTx();
    table.close();
    Assert.assertEquals(4, encodeCount.get());
}
Also used : TableId(co.cask.cdap.data2.util.TableId) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scanner(co.cask.cdap.api.dataset.table.Scanner) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HTable(org.apache.hadoop.hbase.client.HTable) Result(org.apache.hadoop.hbase.client.Result) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) DatasetSpecification(co.cask.cdap.api.dataset.DatasetSpecification) AtomicReference(java.util.concurrent.atomic.AtomicReference) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) Transaction(org.apache.tephra.Transaction) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TransactionCodec(org.apache.tephra.TransactionCodec) Get(co.cask.cdap.api.dataset.table.Get) Scan(co.cask.cdap.api.dataset.table.Scan) BufferingTableTest(co.cask.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Example 2 with HBaseTable

use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.

the class HBaseTableTest method testCachedEncodedTransaction.

@Test
public void testCachedEncodedTransaction() throws Exception {
    String tableName = "testEncodedTxTable";
    DatasetProperties props = DatasetProperties.EMPTY;
    getTableAdmin(CONTEXT1, tableName, props).create();
    DatasetSpecification tableSpec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).build();
    // use a transaction codec that counts the number of times encode() is called
    final AtomicInteger encodeCount = new AtomicInteger();
    final TransactionCodec codec = new TransactionCodec() {

        @Override
        public byte[] encode(Transaction tx) throws IOException {
            encodeCount.incrementAndGet();
            return super.encode(tx);
        }
    };
    // use a table util that creates an HTable that validates the encoded tx on each get
    final AtomicReference<Transaction> txRef = new AtomicReference<>();
    HBaseTableUtil util = new DelegatingHBaseTableUtil(hBaseTableUtil) {

        @Override
        public org.apache.hadoop.hbase.client.Table createTable(Configuration conf, TableId tableId) throws IOException {
            org.apache.hadoop.hbase.client.Table table = super.createTable(conf, tableId);
            return new DelegatingTable(table) {

                @Override
                public Result get(org.apache.hadoop.hbase.client.Get get) throws IOException {
                    Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
                    return super.get(get);
                }

                @Override
                public Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException {
                    for (org.apache.hadoop.hbase.client.Get get : gets) {
                        Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(get.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
                    }
                    return super.get(gets);
                }

                @Override
                public ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan) throws IOException {
                    Assert.assertEquals(txRef.get().getTransactionId(), codec.decode(scan.getAttribute(TxConstants.TX_OPERATION_ATTRIBUTE_KEY)).getTransactionId());
                    return super.getScanner(scan);
                }
            };
        }
    };
    HBaseTable table = new HBaseTable(CONTEXT1, tableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), util, codec);
    DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
    // test all operations: only the first one encodes
    Transaction tx = txSystemClient.startShort();
    txRef.set(tx);
    table.startTx(tx);
    table.put(b("row1"), b("col1"), b("val1"));
    Assert.assertEquals(0, encodeCount.get());
    table.get(b("row"));
    Assert.assertEquals(1, encodeCount.get());
    table.get(ImmutableList.of(new Get("a"), new Get("b")));
    Assert.assertEquals(1, encodeCount.get());
    Scanner scanner = table.scan(new Scan(null, null));
    Assert.assertEquals(1, encodeCount.get());
    scanner.close();
    table.increment(b("z"), b("z"), 0L);
    Assert.assertEquals(1, encodeCount.get());
    table.commitTx();
    table.postTxCommit();
    // test that for the next tx, we encode again
    tx = txSystemClient.startShort();
    txRef.set(tx);
    table.startTx(tx);
    table.get(b("row"));
    Assert.assertEquals(2, encodeCount.get());
    table.commitTx();
    // test that we encode again, even of postTxCommit was not called
    tx = txSystemClient.startShort();
    txRef.set(tx);
    table.startTx(tx);
    table.get(b("row"));
    Assert.assertEquals(3, encodeCount.get());
    table.commitTx();
    table.rollbackTx();
    // test that rollback does not encode the tx
    Assert.assertEquals(3, encodeCount.get());
    // test that we encode again if the previous tx rolled back
    tx = txSystemClient.startShort();
    txRef.set(tx);
    table.startTx(tx);
    table.get(b("row"));
    Assert.assertEquals(4, encodeCount.get());
    table.commitTx();
    table.close();
    Assert.assertEquals(4, encodeCount.get());
}
Also used : TableId(io.cdap.cdap.data2.util.TableId) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scanner(io.cdap.cdap.api.dataset.table.Scanner) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Configuration(org.apache.hadoop.conf.Configuration) CConfiguration(io.cdap.cdap.common.conf.CConfiguration) Result(org.apache.hadoop.hbase.client.Result) DelegatingTable(io.cdap.cdap.data2.util.hbase.DelegatingTable) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) DatasetProperties(io.cdap.cdap.api.dataset.DatasetProperties) DatasetSpecification(io.cdap.cdap.api.dataset.DatasetSpecification) AtomicReference(java.util.concurrent.atomic.AtomicReference) HBaseTableUtil(io.cdap.cdap.data2.util.hbase.HBaseTableUtil) Transaction(org.apache.tephra.Transaction) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TransactionCodec(org.apache.tephra.TransactionCodec) Get(io.cdap.cdap.api.dataset.table.Get) Scan(io.cdap.cdap.api.dataset.table.Scan) BufferingTableTest(io.cdap.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Example 3 with HBaseTable

use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.

the class HBaseTableTest method testEnforceTxLifetime.

@Test
public void testEnforceTxLifetime() throws Exception {
    String tableName = "enforce-tx-lifetime";
    DatasetProperties datasetProperties = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, datasetProperties);
    admin.create();
    DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
    DatasetSpecification spec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).properties(datasetProperties.getProperties()).build();
    try {
        final HBaseTable table = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
        Transaction tx = txSystemClient.startShort();
        table.startTx(tx);
        table.put(b("row1"), b("col1"), b("val1"));
        table.put(b("inc1"), b("col1"), Bytes.toBytes(10L));
        table.commitTx();
        table.postTxCommit();
        table.close();
        CConfiguration testCConf = CConfiguration.copy(cConf);
        // No mutations on tables using testCConf will succeed.
        testCConf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 0);
        try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
            // A put should fail
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.put(b("row2"), b("col1"), b("val1"));
                }
            });
            // A delete should also fail
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.delete(b("row1"));
                }
            });
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.delete(b("row1"), b("col1"));
                }
            });
            // So should an increment
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.increment(b("inc1"), b("col1"), 10);
                }
            });
            // incrementAndGet gets converted to a put internally
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.incrementAndGet(b("inc1"), b("col1"), 10);
                }
            });
        }
        // Even safe increments should fail (this happens when readless increment is done from a mapreduce job)
        try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, ImmutableMap.of(HBaseTable.SAFE_INCREMENTS, "true"), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
            // So should an increment
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.increment(b("inc1"), b("col1"), 10);
                }
            });
            // incrementAndGet gets converted to a put internally
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.incrementAndGet(b("inc1"), b("col1"), 10);
                }
            });
        }
    } finally {
        admin.drop();
        admin.close();
    }
}
Also used : Transaction(org.apache.tephra.Transaction) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) DatasetSpecification(co.cask.cdap.api.dataset.DatasetSpecification) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) CConfiguration(co.cask.cdap.common.conf.CConfiguration) BufferingTableTest(co.cask.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Example 4 with HBaseTable

use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.

the class HBaseTableTest method testTTL.

@Test
public void testTTL() throws Exception {
    // for the purpose of this test it is fine not to configure ttl when creating table: we want to see if it
    // applies on reading
    int ttl = 1;
    String ttlTable = "ttl";
    String noTtlTable = "nottl";
    DatasetProperties props = TableProperties.builder().setTTL(ttl).build();
    getTableAdmin(CONTEXT1, ttlTable, props).create();
    DatasetSpecification ttlTableSpec = DatasetSpecification.builder(ttlTable, HBaseTable.class.getName()).properties(props.getProperties()).build();
    HBaseTable table = new HBaseTable(CONTEXT1, ttlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
    DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
    Transaction tx = txSystemClient.startShort();
    table.startTx(tx);
    table.put(b("row1"), b("col1"), b("val1"));
    table.commitTx();
    TimeUnit.MILLISECONDS.sleep(1010);
    tx = txSystemClient.startShort();
    table.startTx(tx);
    table.put(b("row2"), b("col2"), b("val2"));
    table.commitTx();
    // now, we should not see first as it should have expired, but see the last one
    tx = txSystemClient.startShort();
    table.startTx(tx);
    byte[] val = table.get(b("row1"), b("col1"));
    if (val != null) {
        LOG.info("Unexpected value " + Bytes.toStringBinary(val));
    }
    Assert.assertNull(val);
    Assert.assertArrayEquals(b("val2"), table.get(b("row2"), b("col2")));
    // test a table with no TTL
    DatasetProperties props2 = TableProperties.builder().setTTL(Tables.NO_TTL).build();
    getTableAdmin(CONTEXT1, noTtlTable, props2).create();
    DatasetSpecification noTtlTableSpec = DatasetSpecification.builder(noTtlTable, HBaseTable.class.getName()).properties(props2.getProperties()).build();
    HBaseTable table2 = new HBaseTable(CONTEXT1, noTtlTableSpec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
    tx = txSystemClient.startShort();
    table2.startTx(tx);
    table2.put(b("row1"), b("col1"), b("val1"));
    table2.commitTx();
    TimeUnit.SECONDS.sleep(2);
    tx = txSystemClient.startShort();
    table2.startTx(tx);
    table2.put(b("row2"), b("col2"), b("val2"));
    table2.commitTx();
    // if ttl is -1 (unlimited), it should see both
    tx = txSystemClient.startShort();
    table2.startTx(tx);
    Assert.assertArrayEquals(b("val1"), table2.get(b("row1"), b("col1")));
    Assert.assertArrayEquals(b("val2"), table2.get(b("row2"), b("col2")));
}
Also used : Transaction(org.apache.tephra.Transaction) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) DatasetSpecification(co.cask.cdap.api.dataset.DatasetSpecification) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) BufferingTableTest(co.cask.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Example 5 with HBaseTable

use of io.cdap.cdap.data2.dataset2.lib.table.hbase.HBaseTable in project cdap by caskdata.

the class HBaseTableTest method testEnforceTxLifetime.

@Test
public void testEnforceTxLifetime() throws Exception {
    String tableName = "enforce-tx-lifetime";
    DatasetProperties datasetProperties = TableProperties.builder().setReadlessIncrementSupport(true).setConflictDetection(ConflictDetection.COLUMN).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, datasetProperties);
    admin.create();
    DetachedTxSystemClient txSystemClient = new DetachedTxSystemClient();
    DatasetSpecification spec = DatasetSpecification.builder(tableName, HBaseTable.class.getName()).properties(datasetProperties.getProperties()).build();
    try {
        final HBaseTable table = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), cConf, TEST_HBASE.getConfiguration(), hBaseTableUtil);
        Transaction tx = txSystemClient.startShort();
        table.startTx(tx);
        table.put(b("row1"), b("col1"), b("val1"));
        table.put(b("inc1"), b("col1"), Bytes.toBytes(10L));
        table.commitTx();
        table.postTxCommit();
        table.close();
        CConfiguration testCConf = CConfiguration.copy(cConf);
        // No mutations on tables using testCConf will succeed.
        testCConf.setInt(TxConstants.Manager.CFG_TX_MAX_LIFETIME, 0);
        try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, Collections.<String, String>emptyMap(), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
            // A put should fail
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.put(b("row2"), b("col1"), b("val1"));
                }
            });
            // A delete should also fail
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.delete(b("row1"));
                }
            });
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.delete(b("row1"), b("col1"));
                }
            });
            // So should an increment
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.increment(b("inc1"), b("col1"), 10);
                }
            });
            // incrementAndGet gets converted to a put internally
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.incrementAndGet(b("inc1"), b("col1"), 10);
                }
            });
        }
        // Even safe increments should fail (this happens when readless increment is done from a mapreduce job)
        try (final HBaseTable failTable = new HBaseTable(CONTEXT1, spec, ImmutableMap.of(HBaseTable.SAFE_INCREMENTS, "true"), testCConf, TEST_HBASE.getConfiguration(), hBaseTableUtil)) {
            // So should an increment
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.increment(b("inc1"), b("col1"), 10);
                }
            });
            // incrementAndGet gets converted to a put internally
            assertTxFail(txSystemClient, failTable, new Runnable() {

                @Override
                public void run() {
                    failTable.incrementAndGet(b("inc1"), b("col1"), 10);
                }
            });
        }
    } finally {
        admin.drop();
        admin.close();
    }
}
Also used : Transaction(org.apache.tephra.Transaction) DatasetProperties(io.cdap.cdap.api.dataset.DatasetProperties) DatasetSpecification(io.cdap.cdap.api.dataset.DatasetSpecification) DatasetAdmin(io.cdap.cdap.api.dataset.DatasetAdmin) DetachedTxSystemClient(org.apache.tephra.inmemory.DetachedTxSystemClient) CConfiguration(io.cdap.cdap.common.conf.CConfiguration) BufferingTableTest(io.cdap.cdap.data2.dataset2.lib.table.BufferingTableTest) Test(org.junit.Test)

Aggregations

Transaction (org.apache.tephra.Transaction)6 DetachedTxSystemClient (org.apache.tephra.inmemory.DetachedTxSystemClient)6 Test (org.junit.Test)6 DatasetProperties (co.cask.cdap.api.dataset.DatasetProperties)3 DatasetSpecification (co.cask.cdap.api.dataset.DatasetSpecification)3 BufferingTableTest (co.cask.cdap.data2.dataset2.lib.table.BufferingTableTest)3 DatasetProperties (io.cdap.cdap.api.dataset.DatasetProperties)3 DatasetSpecification (io.cdap.cdap.api.dataset.DatasetSpecification)3 BufferingTableTest (io.cdap.cdap.data2.dataset2.lib.table.BufferingTableTest)3 CConfiguration (co.cask.cdap.common.conf.CConfiguration)2 ImmutableList (com.google.common.collect.ImmutableList)2 CConfiguration (io.cdap.cdap.common.conf.CConfiguration)2 List (java.util.List)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 AtomicReference (java.util.concurrent.atomic.AtomicReference)2 Configuration (org.apache.hadoop.conf.Configuration)2 Result (org.apache.hadoop.hbase.client.Result)2 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)2 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)2 TransactionCodec (org.apache.tephra.TransactionCodec)2