Search in sources :

Example 86 with Transaction

use of org.apache.tephra.Transaction in project cdap by caskdata.

the class BufferingTableTest method testTxChangePrefix.

// This test is in Buffering table because it needs to test the transaction change prefix
@Test
public void testTxChangePrefix() throws Exception {
    String tableName = "same";
    DatasetAdmin admin1 = getTableAdmin(CONTEXT1, tableName);
    DatasetAdmin admin2 = getTableAdmin(CONTEXT2, tableName);
    admin1.create();
    admin2.create();
    try {
        BufferingTable table1 = getTable(CONTEXT1, tableName);
        BufferingTable table2 = getTable(CONTEXT2, tableName);
        // write some values in table1
        Transaction tx1 = txClient.startShort();
        table1.startTx(tx1);
        table1.put(R1, a(C1), a(V1));
        Collection<byte[]> tx1Changes = table1.getTxChanges();
        txClient.canCommitOrThrow(tx1, tx1Changes);
        Assert.assertTrue(table1.commitTx());
        txClient.commitOrThrow(tx1);
        table1.postTxCommit();
        // write some values in table2
        Transaction tx2 = txClient.startShort();
        table2.startTx(tx2);
        table2.put(R1, a(C1), a(V1));
        Collection<byte[]> tx2Changes = table2.getTxChanges();
        txClient.canCommitOrThrow(tx2, tx2Changes);
        Assert.assertTrue(table2.commitTx());
        txClient.commitOrThrow(tx2);
        table1.postTxCommit();
        String tx1ChangePrefix = new String(table1.getNameAsTxChangePrefix());
        String tx2ChangePrefix = new String(table2.getNameAsTxChangePrefix());
        String tx1Change = new String(((ArrayList<byte[]>) tx1Changes).get(0));
        String tx2Change = new String(((ArrayList<byte[]>) tx2Changes).get(0));
        Assert.assertNotEquals(tx1ChangePrefix, tx2ChangePrefix);
        Assert.assertTrue(tx1ChangePrefix.contains(NAMESPACE1.getEntityName()));
        Assert.assertTrue(tx2ChangePrefix.contains(NAMESPACE2.getEntityName()));
        Assert.assertTrue(tx1Change.startsWith(tx1ChangePrefix));
        Assert.assertTrue(tx2Change.startsWith(tx2ChangePrefix));
    } finally {
        admin1.drop();
        admin2.drop();
    }
}
Also used : Transaction(org.apache.tephra.Transaction) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Test(org.junit.Test)

Example 87 with Transaction

use of org.apache.tephra.Transaction in project cdap by caskdata.

the class BufferingTableTest method testMultiGetIncludesBuffer.

@Test
public void testMultiGetIncludesBuffer() throws Exception {
    DatasetAdmin admin = getTableAdmin(CONTEXT1, MY_TABLE);
    admin.create();
    try {
        // persist some data
        BufferingTable table = getTable(CONTEXT1, MY_TABLE);
        Transaction tx1 = txClient.startShort();
        table.startTx(tx1);
        // writing a couple rows
        // table should look like the following, with everything in the buffer
        // c1    c2    c3    c4
        // r1       1     2     3     -
        // r2       -     3     2     1
        table.put(R1, a(C1, C2, C3), lb(1, 2, 3));
        table.put(R2, a(C2, C3, C4), lb(3, 2, 1));
        // check that multi-get can see buffered writes
        List<Row> rows = table.get(Lists.newArrayList(new Get(R1), new Get(R2)));
        Assert.assertEquals(2, rows.size());
        TableAssert.assertRow(rows.get(0), R1, a(C1, C2, C3), lb(1, 2, 3));
        TableAssert.assertRow(rows.get(1), R2, a(C2, C3, C4), lb(3, 2, 1));
        // check multi-get with gets that specify columns, and one get that should return an empty row
        rows = table.get(Lists.newArrayList(new Get(R1, C2, C3), new Get(R2, C2, C3), new Get(R3)));
        Assert.assertEquals(3, rows.size());
        TableAssert.assertRow(rows.get(0), R1, a(C2, C3), lb(2, 3));
        TableAssert.assertRow(rows.get(1), R2, a(C2, C3), lb(3, 2));
        Assert.assertTrue(rows.get(2).isEmpty());
        // persist changes
        Collection<byte[]> txChanges = table.getTxChanges();
        txClient.canCommitOrThrow(tx1, txChanges);
        Assert.assertTrue(table.commitTx());
        txClient.commitOrThrow(tx1);
        table.postTxCommit();
        // start another transaction
        Transaction tx2 = txClient.startShort();
        table.startTx(tx2);
        // now add another row, delete a row, and change some column values
        // table should look like the following
        // c1    c2    c3    c4    c5
        // r1      -     -     3     2     -
        // r3      -     -     -     -     1
        table.put(R1, a(C2, C3, C4), lb(4, 3, 2));
        table.delete(R1, a(C1, C2));
        table.delete(R2);
        table.put(R3, C5, L1);
        // verify multi-get sees persisted data with buffer applied on top
        rows = table.get(Lists.newArrayList(new Get(R1), new Get(R2), new Get(R3)));
        Assert.assertEquals(3, rows.size());
        TableAssert.assertRow(rows.get(0), R1, a(C3, C4), lb(3, 2));
        Assert.assertTrue(rows.get(1).isEmpty());
        TableAssert.assertRow(rows.get(2), R3, a(C5), lb(1));
        // pretend there was a write conflict and rollback changes
        Assert.assertTrue(table.rollbackTx());
        txClient.abort(tx2);
        // start another transaction and make sure it can't see what was done before
        Transaction tx3 = txClient.startShort();
        table.startTx(tx3);
        rows = table.get(Lists.newArrayList(new Get(R1), new Get(R2)));
        Assert.assertEquals(2, rows.size());
        TableAssert.assertRow(rows.get(0), R1, a(C1, C2, C3), lb(1, 2, 3));
        TableAssert.assertRow(rows.get(1), R2, a(C2, C3, C4), lb(3, 2, 1));
    } finally {
        admin.drop();
    }
}
Also used : Transaction(org.apache.tephra.Transaction) Get(co.cask.cdap.api.dataset.table.Get) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Row(co.cask.cdap.api.dataset.table.Row) Test(org.junit.Test)

Example 88 with Transaction

use of org.apache.tephra.Transaction in project cdap by caskdata.

the class MessageTableTest method testNonTxAndTxConsumption.

@Test
public void testNonTxAndTxConsumption() throws Exception {
    try (MessageTable table = getMessageTable();
        MetadataTable metadataTable = getMetadataTable()) {
        metadataTable.createTopic(M1);
        metadataTable.createTopic(M2);
        List<MessageTable.Entry> entryList = new ArrayList<>();
        Map<Long, Short> startSequenceIds = new HashMap<>();
        Map<Long, Short> endSequenceIds = new HashMap<>();
        long publishTimestamp = populateList(entryList, Arrays.asList(100L, 101L, 102L), startSequenceIds, endSequenceIds);
        table.store(entryList.iterator());
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M1, 0, Integer.MAX_VALUE, null)) {
            checkPointerCount(iterator, 123, ImmutableSet.of(100L, 101L, 102L), 150);
        }
        // Read with 85 items limit
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M1, 0, 85, null)) {
            checkPointerCount(iterator, 123, ImmutableSet.of(100L, 101L, 102L), 85);
        }
        // Read with all messages visible
        Transaction tx = new Transaction(200, 200, new long[0], new long[0], -1);
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M1, 0, Integer.MAX_VALUE, tx)) {
            checkPointerCount(iterator, 123, ImmutableSet.of(100L, 101L, 102L), 150);
        }
        // Read with 101 as invalid transaction
        tx = new Transaction(200, 200, new long[] { 101 }, new long[0], -1);
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M1, 0, Integer.MAX_VALUE, tx)) {
            checkPointerCount(iterator, 123, ImmutableSet.of(100L, 102L), 100);
        }
        // Mark 101 as in progress transaction, then we shouldn't read past committed transaction which is 100.
        tx = new Transaction(100, 100, new long[] {}, new long[] { 101 }, -1);
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M1, 0, Integer.MAX_VALUE, tx)) {
            checkPointerCount(iterator, 123, ImmutableSet.of(100L), 50);
        }
        // Same read as above but with limit of 10 elements
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M1, 0, 10, tx)) {
            checkPointerCount(iterator, 123, ImmutableSet.of(100L), 10);
        }
        // Reading non-tx from t2 should provide 150 items
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M2, 0, Integer.MAX_VALUE, null)) {
            checkPointerCount(iterator, 321, ImmutableSet.of(100L, 101L, 102L), 150);
        }
        // Delete txPtr entries for 101, and then try fetching again for that
        RollbackDetail rollbackDetail = new TestRollbackDetail(101L, publishTimestamp, startSequenceIds.get(101L), publishTimestamp, endSequenceIds.get(101L));
        table.rollback(M1, rollbackDetail);
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M1, 0, Integer.MAX_VALUE, null)) {
            checkPointerCount(iterator, 123, ImmutableSet.of(100L, 101L, 102L), 150);
        }
        // Delete txPtr entries for 100, and then try fetching transactionally all data
        rollbackDetail = new TestRollbackDetail(100L, publishTimestamp, startSequenceIds.get(100L), publishTimestamp, endSequenceIds.get(100L));
        table.rollback(M1, rollbackDetail);
        tx = new Transaction(200, 200, new long[0], new long[0], -1);
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M1, 0, Integer.MAX_VALUE, tx)) {
            checkPointerCount(iterator, 123, ImmutableSet.of(102L), 50);
        }
        // Use the above tx and read from t2 and it should give all entries
        try (CloseableIterator<MessageTable.Entry> iterator = table.fetch(M2, 0, Integer.MAX_VALUE, tx)) {
            checkPointerCount(iterator, 321, ImmutableSet.of(100L, 101L, 102L), 150);
        }
    }
}
Also used : RollbackDetail(co.cask.cdap.messaging.RollbackDetail) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Transaction(org.apache.tephra.Transaction) Test(org.junit.Test)

Example 89 with Transaction

use of org.apache.tephra.Transaction in project cdap by caskdata.

the class DequeueScanObserver method preScannerOpen.

@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) throws IOException {
    ConsumerConfig consumerConfig = DequeueScanAttributes.getConsumerConfig(scan);
    Transaction tx = DequeueScanAttributes.getTx(scan);
    if (consumerConfig == null || tx == null) {
        return super.preScannerOpen(e, scan, s);
    }
    Filter dequeueFilter = new DequeueFilter(consumerConfig, tx);
    Filter existing = scan.getFilter();
    if (existing != null) {
        Filter combined = new FilterList(FilterList.Operator.MUST_PASS_ALL, existing, dequeueFilter);
        scan.setFilter(combined);
    } else {
        scan.setFilter(dequeueFilter);
    }
    return super.preScannerOpen(e, scan, s);
}
Also used : Transaction(org.apache.tephra.Transaction) Filter(org.apache.hadoop.hbase.filter.Filter) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) FilterList(org.apache.hadoop.hbase.filter.FilterList)

Example 90 with Transaction

use of org.apache.tephra.Transaction in project cdap by caskdata.

the class DequeueScanObserver method preScannerOpen.

@Override
public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) throws IOException {
    ConsumerConfig consumerConfig = DequeueScanAttributes.getConsumerConfig(scan);
    Transaction tx = DequeueScanAttributes.getTx(scan);
    if (consumerConfig == null || tx == null) {
        return super.preScannerOpen(e, scan, s);
    }
    Filter dequeueFilter = new DequeueFilter(consumerConfig, tx);
    Filter existing = scan.getFilter();
    if (existing != null) {
        Filter combined = new FilterList(FilterList.Operator.MUST_PASS_ALL, existing, dequeueFilter);
        scan.setFilter(combined);
    } else {
        scan.setFilter(dequeueFilter);
    }
    return super.preScannerOpen(e, scan, s);
}
Also used : Transaction(org.apache.tephra.Transaction) Filter(org.apache.hadoop.hbase.filter.Filter) ConsumerConfig(co.cask.cdap.data2.queue.ConsumerConfig) FilterList(org.apache.hadoop.hbase.filter.FilterList)

Aggregations

Transaction (org.apache.tephra.Transaction)99 Test (org.junit.Test)54 TransactionAware (org.apache.tephra.TransactionAware)34 Table (co.cask.cdap.api.dataset.table.Table)29 DatasetAdmin (co.cask.cdap.api.dataset.DatasetAdmin)27 HBaseTable (co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable)22 Put (co.cask.cdap.api.dataset.table.Put)12 DatasetProperties (co.cask.cdap.api.dataset.DatasetProperties)11 Get (co.cask.cdap.api.dataset.table.Get)10 TransactionSystemClient (org.apache.tephra.TransactionSystemClient)10 Row (co.cask.cdap.api.dataset.table.Row)8 ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)8 KeyStructValueTableDefinition (co.cask.cdap.explore.service.datasets.KeyStructValueTableDefinition)8 Scan (co.cask.cdap.api.dataset.table.Scan)7 ArrayList (java.util.ArrayList)7 CConfiguration (co.cask.cdap.common.conf.CConfiguration)6 ExploreExecutionResult (co.cask.cdap.explore.client.ExploreExecutionResult)6 DatasetId (co.cask.cdap.proto.id.DatasetId)6 IOException (java.io.IOException)6 BufferingTableTest (co.cask.cdap.data2.dataset2.lib.table.BufferingTableTest)5