Search in sources :

Example 41 with TransactionContext

use of org.apache.tephra.TransactionContext in project cdap by caskdata.

the class NotificationTest method useTransactionTest.

@Test
public void useTransactionTest() throws Exception {
    // Performing admin operations to create dataset instance
    // keyValueTable is a system dataset module
    namespaceAdmin.create(new NamespaceMeta.Builder().setName(namespace).build());
    DatasetId myTableInstance = namespace.dataset("myTable");
    dsFramework.addInstance("keyValueTable", myTableInstance, DatasetProperties.EMPTY);
    final CountDownLatch receivedLatch = new CountDownLatch(1);
    Assert.assertTrue(feedManager.createFeed(FEED1_INFO));
    try {
        Cancellable cancellable = notificationService.subscribe(FEED1, new NotificationHandler<String>() {

            private int received = 0;

            @Override
            public Type getNotificationType() {
                return String.class;
            }

            @Override
            public void received(final String notification, NotificationContext notificationContext) {
                notificationContext.execute(new TxRunnable() {

                    @Override
                    public void run(DatasetContext context) throws Exception {
                        KeyValueTable table = context.getDataset("myTable");
                        table.write("foo", String.format("%s-%d", notification, received++));
                        receivedLatch.countDown();
                    }
                }, TxRetryPolicy.maxRetries(5));
            }
        });
        // Short delay for the subscriber to setup the subscription.
        TimeUnit.MILLISECONDS.sleep(500);
        try {
            notificationService.publish(FEED1, "foobar");
            // Waiting for the subscriber to receive that notification
            Assert.assertTrue(receivedLatch.await(5, TimeUnit.SECONDS));
            // Read the KeyValueTable for the value updated from the subscriber.
            // Need to poll it couple times since after the received method returned,
            // the tx may not yet committed when we try to read it here.
            final KeyValueTable table = dsFramework.getDataset(myTableInstance, DatasetDefinition.NO_ARGUMENTS, null);
            Assert.assertNotNull(table);
            final TransactionContext txContext = new TransactionContext(txClient, table);
            Tasks.waitFor(true, new Callable<Boolean>() {

                @Override
                public Boolean call() throws Exception {
                    txContext.start();
                    try {
                        return "foobar-0".equals(Bytes.toString(table.read("foo")));
                    } finally {
                        txContext.finish();
                    }
                }
            }, 5, TimeUnit.SECONDS);
        } finally {
            cancellable.cancel();
        }
    } finally {
        dsFramework.deleteInstance(myTableInstance);
        feedManager.deleteFeed(FEED1);
        namespaceAdmin.delete(namespace);
    }
}
Also used : Cancellable(org.apache.twill.common.Cancellable) CountDownLatch(java.util.concurrent.CountDownLatch) NotificationFeedNotFoundException(co.cask.cdap.notifications.feeds.NotificationFeedNotFoundException) DatasetId(co.cask.cdap.proto.id.DatasetId) NotificationContext(co.cask.cdap.notifications.service.NotificationContext) Type(java.lang.reflect.Type) NamespaceMeta(co.cask.cdap.proto.NamespaceMeta) TxRunnable(co.cask.cdap.api.TxRunnable) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) TransactionContext(org.apache.tephra.TransactionContext) DatasetContext(co.cask.cdap.api.data.DatasetContext) Test(org.junit.Test)

Example 42 with TransactionContext

use of org.apache.tephra.TransactionContext in project cdap by caskdata.

the class UnitTestManager method getDataset.

@Override
public <T> DataSetManager<T> getDataset(DatasetId datasetInstanceId) throws Exception {
    @SuppressWarnings("unchecked") final T dataSet = datasetFramework.getDataset(datasetInstanceId, new HashMap<String, String>(), null);
    try {
        final TransactionContext txContext;
        // not every dataset is TransactionAware. FileSets for example, are not transactional.
        if (dataSet instanceof TransactionAware) {
            TransactionAware txAwareDataset = (TransactionAware) dataSet;
            txContext = new TransactionContext(txSystemClient, Lists.newArrayList(txAwareDataset));
            txContext.start();
        } else {
            txContext = null;
        }
        return new UnitTestDatasetManager<>(dataSet, txContext);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}
Also used : TransactionContext(org.apache.tephra.TransactionContext) TransactionAware(org.apache.tephra.TransactionAware) TransactionFailureException(org.apache.tephra.TransactionFailureException) IOException(java.io.IOException)

Example 43 with TransactionContext

use of org.apache.tephra.TransactionContext in project cdap by caskdata.

the class PartitionedFileSetTest method testRollbackOnTransactionAbort.

@Test
public void testRollbackOnTransactionAbort() throws Exception {
    PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsInstance);
    TransactionContext txContext = new TransactionContext(txClient, (TransactionAware) pfs);
    txContext.start();
    PartitionOutput output = pfs.getPartitionOutput(PARTITION_KEY);
    Location outputLocation = output.getLocation().append("file");
    Assert.assertFalse(outputLocation.exists());
    // this will create the file
    outputLocation.getOutputStream().close();
    Assert.assertTrue(outputLocation.exists());
    output.addPartition();
    Assert.assertNotNull(pfs.getPartition(PARTITION_KEY));
    Assert.assertTrue(pfs.getPartition(PARTITION_KEY).getLocation().exists());
    txContext.abort();
    // because the previous transaction aborted, the partition as well as the file will not exist
    txContext.start();
    Assert.assertNull(pfs.getPartition(PARTITION_KEY));
    Assert.assertFalse(outputLocation.exists());
    txContext.finish();
}
Also used : PartitionOutput(co.cask.cdap.api.dataset.lib.PartitionOutput) TransactionContext(org.apache.tephra.TransactionContext) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) Location(org.apache.twill.filesystem.Location) Test(org.junit.Test)

Example 44 with TransactionContext

use of org.apache.tephra.TransactionContext in project cdap by caskdata.

the class PartitionedFileSetTest method testRollbackOnJobFailure.

@Test
public void testRollbackOnJobFailure() throws Exception {
    // tests the logic of #onFailure method
    Map<String, String> args = new HashMap<>();
    FileSetArguments.setOutputPath(args, "custom/output/path");
    PartitionedFileSetArguments.setOutputPartitionKey(args, PARTITION_KEY);
    PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsInstance, args);
    TransactionContext txContext = new TransactionContext(txClient, (TransactionAware) pfs);
    txContext.start();
    Location outputLocation = pfs.getEmbeddedFileSet().getOutputLocation();
    Assert.assertFalse(outputLocation.exists());
    outputLocation.mkdirs();
    Assert.assertTrue(outputLocation.exists());
    ((PartitionedFileSetDataset) pfs).onFailure();
    txContext.abort();
    // because the previous transaction aborted, the partition as well as the directory for it will not exist
    txContext.start();
    Assert.assertNull(pfs.getPartition(PARTITION_KEY));
    Assert.assertFalse(outputLocation.exists());
    txContext.finish();
}
Also used : HashMap(java.util.HashMap) TransactionContext(org.apache.tephra.TransactionContext) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) Location(org.apache.twill.filesystem.Location) Test(org.junit.Test)

Example 45 with TransactionContext

use of org.apache.tephra.TransactionContext in project cdap by caskdata.

the class PartitionConsumerTest method testPartitionConsumer.

@Test
public void testPartitionConsumer() throws Exception {
    // exercises the edge case of partition consumption, when partitions are being consumed, while another in-progress
    // transaction has added a partition, but it has not yet committed, so the partition is not available for the
    // consumer
    PartitionedFileSet dataset1 = dsFrameworkUtil.getInstance(pfsInstance);
    PartitionedFileSet dataset2 = dsFrameworkUtil.getInstance(pfsInstance);
    TransactionManager txManager = dsFrameworkUtil.getTxManager();
    InMemoryTxSystemClient txClient = new InMemoryTxSystemClient(txManager);
    // producer simply adds initial partition
    TransactionContext txContext1 = new TransactionContext(txClient, (TransactionAware) dataset1);
    txContext1.start();
    PartitionKey partitionKey1 = generateUniqueKey();
    dataset1.getPartitionOutput(partitionKey1).addPartition();
    txContext1.finish();
    // consumer simply consumes initial partition
    TransactionContext txContext2 = new TransactionContext(txClient, (TransactionAware) dataset2);
    txContext2.start();
    PartitionConsumer partitionConsumer = new ConcurrentPartitionConsumer(dataset2, new InMemoryStatePersistor());
    List<? extends PartitionDetail> partitionIterator = partitionConsumer.consumePartitions().getPartitions();
    Assert.assertEquals(1, partitionIterator.size());
    Assert.assertEquals(partitionKey1, partitionIterator.get(0).getPartitionKey());
    txContext2.finish();
    // producer adds a second partition, but does not yet commit the transaction
    txContext1.start();
    PartitionKey partitionKey2 = generateUniqueKey();
    dataset1.getPartitionOutput(partitionKey2).addPartition();
    // consumer attempts to consume at a time after the partition was added, but before it committed. Because of this,
    // the partition is not visible and will not be consumed
    txContext2.start();
    Assert.assertTrue(partitionConsumer.consumePartitions().getPartitions().isEmpty());
    txContext2.finish();
    // producer commits the transaction in which the second partition was added
    txContext1.finish();
    // the next time the consumer runs, it processes the second partition
    txContext2.start();
    partitionIterator = partitionConsumer.consumePartitions().getPartitions();
    Assert.assertEquals(1, partitionIterator.size());
    Assert.assertEquals(partitionKey2, partitionIterator.get(0).getPartitionKey());
    txContext2.finish();
}
Also used : ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) TransactionManager(org.apache.tephra.TransactionManager) TransactionContext(org.apache.tephra.TransactionContext) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) PartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.PartitionConsumer) InMemoryTxSystemClient(org.apache.tephra.inmemory.InMemoryTxSystemClient) Test(org.junit.Test)

Aggregations

TransactionContext (org.apache.tephra.TransactionContext)47 Test (org.junit.Test)34 TransactionFailureException (org.apache.tephra.TransactionFailureException)18 ConsumerConfig (co.cask.cdap.data2.queue.ConsumerConfig)14 QueueConsumer (co.cask.cdap.data2.queue.QueueConsumer)9 ConsumerGroupConfig (co.cask.cdap.data2.queue.ConsumerGroupConfig)8 PartitionedFileSet (co.cask.cdap.api.dataset.lib.PartitionedFileSet)7 QueueName (co.cask.cdap.common.queue.QueueName)7 QueueEntry (co.cask.cdap.data2.queue.QueueEntry)7 QueueProducer (co.cask.cdap.data2.queue.QueueProducer)7 StreamEvent (co.cask.cdap.api.flow.flowlet.StreamEvent)6 StreamId (co.cask.cdap.proto.id.StreamId)5 Location (org.apache.twill.filesystem.Location)4 PartitionKey (co.cask.cdap.api.dataset.lib.PartitionKey)3 PartitionOutput (co.cask.cdap.api.dataset.lib.PartitionOutput)3 Properties (java.util.Properties)3 Stopwatch (com.google.common.base.Stopwatch)2 FileOutputStream (java.io.FileOutputStream)2 OutputStream (java.io.OutputStream)2 CountDownLatch (java.util.concurrent.CountDownLatch)2