Search in sources :

Example 61 with TransactionAware

use of org.apache.tephra.TransactionAware in project cdap by caskdata.

the class PartitionedFileSetTest method testSimplePartitionConsuming.

@Test
public void testSimplePartitionConsuming() throws Exception {
    final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
    final TransactionAware txAwareDataset = (TransactionAware) dataset;
    final Set<PartitionKey> partitionKeys1 = Sets.newHashSet();
    for (int i = 0; i < 10; i++) {
        partitionKeys1.add(generateUniqueKey());
    }
    final Set<PartitionKey> partitionKeys2 = Sets.newHashSet();
    for (int i = 0; i < 15; i++) {
        partitionKeys2.add(generateUniqueKey());
    }
    final SimplePartitionConsumer partitionConsumer = new SimplePartitionConsumer(dataset);
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            for (PartitionKey partitionKey : partitionKeys1) {
                dataset.getPartitionOutput(partitionKey).addPartition();
            }
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // Initial consumption results in the partitions corresponding to partitionKeys1 to be consumed because only
            // those partitions are added to the dataset at this point
            List<Partition> consumedPartitions = Lists.newArrayList();
            Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions());
            Set<PartitionKey> retrievedKeys = Sets.newHashSet();
            for (Partition consumedPartition : consumedPartitions) {
                retrievedKeys.add(consumedPartition.getPartitionKey());
            }
            Assert.assertEquals(partitionKeys1, retrievedKeys);
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            for (PartitionKey partitionKey : partitionKeys2) {
                dataset.getPartitionOutput(partitionKey).addPartition();
            }
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // using the same PartitionConsumer (which remembers the PartitionConsumerState) to consume additional
            // partitions results in only the newly added partitions (corresponding to partitionKeys2) to be returned
            List<Partition> consumedPartitions = Lists.newArrayList();
            Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions());
            Set<PartitionKey> retrievedKeys = Sets.newHashSet();
            for (Partition consumedPartition : consumedPartitions) {
                retrievedKeys.add(consumedPartition.getPartitionKey());
            }
            Assert.assertEquals(partitionKeys2, retrievedKeys);
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // consuming the partitions again, without adding any new partitions returns an empty iterator
            Assert.assertTrue(partitionConsumer.consumePartitions().isEmpty());
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // creating a new PartitionConsumer resets the consumption state. Consuming from it then returns an iterator
            // with all the partition keys
            List<Partition> consumedPartitions = Lists.newArrayList();
            Iterables.addAll(consumedPartitions, new SimplePartitionConsumer(dataset).consumePartitions());
            Set<PartitionKey> retrievedKeys = Sets.newHashSet();
            for (Partition consumedPartition : consumedPartitions) {
                retrievedKeys.add(consumedPartition.getPartitionKey());
            }
            Set<PartitionKey> allKeys = Sets.newHashSet();
            allKeys.addAll(partitionKeys1);
            allKeys.addAll(partitionKeys2);
            Assert.assertEquals(allKeys, retrievedKeys);
        }
    });
}
Also used : Partition(co.cask.cdap.api.dataset.lib.Partition) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) HashSet(java.util.HashSet) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) PartitionNotFoundException(co.cask.cdap.api.dataset.PartitionNotFoundException) IOException(java.io.IOException) DataSetException(co.cask.cdap.api.dataset.DataSetException) TransactionAware(org.apache.tephra.TransactionAware) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) List(java.util.List) Test(org.junit.Test)

Example 62 with TransactionAware

use of org.apache.tephra.TransactionAware in project cdap by caskdata.

the class PartitionedFileSetTest method testAddRemoveGetPartition.

@Test
public void testAddRemoveGetPartition() throws Exception {
    final PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsInstance);
    final AtomicReference<Location> outputLocationRef = new AtomicReference<>();
    dsFrameworkUtil.newTransactionExecutor((TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            PartitionOutput output = pfs.getPartitionOutput(PARTITION_KEY);
            Location outputLocation = output.getLocation().append("file");
            outputLocationRef.set(outputLocation);
            OutputStream out = outputLocation.getOutputStream();
            out.close();
            output.addPartition();
            Assert.assertTrue(outputLocation.exists());
            Assert.assertNotNull(pfs.getPartition(PARTITION_KEY));
            Assert.assertTrue(pfs.getPartition(PARTITION_KEY).getLocation().exists());
            pfs.dropPartition(PARTITION_KEY);
            Assert.assertFalse(outputLocation.exists());
            Assert.assertNull(pfs.getPartition(PARTITION_KEY));
            pfs.dropPartition(PARTITION_KEY);
        }
    });
    // the files of the partition are dropped upon transaction commit
    Assert.assertFalse(outputLocationRef.get().exists());
}
Also used : PartitionOutput(co.cask.cdap.api.dataset.lib.PartitionOutput) TransactionAware(org.apache.tephra.TransactionAware) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) AtomicReference(java.util.concurrent.atomic.AtomicReference) TransactionExecutor(org.apache.tephra.TransactionExecutor) PartitionNotFoundException(co.cask.cdap.api.dataset.PartitionNotFoundException) IOException(java.io.IOException) DataSetException(co.cask.cdap.api.dataset.DataSetException) Location(org.apache.twill.filesystem.Location) Test(org.junit.Test)

Example 63 with TransactionAware

use of org.apache.tephra.TransactionAware in project cdap by caskdata.

the class PartitionConsumerTest method testOnFinishWithInvalidPartition.

@Test
public void testOnFinishWithInvalidPartition() throws Exception {
    // tests:
    //     - attempts to abort a Partition that is not IN_PROGRESS
    //     - attempts to commit a Partition that is already committed
    // both of these throw IllegalArgumentException
    final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
    final TransactionAware txAwareDataset = (TransactionAware) dataset;
    ConsumerConfiguration configuration = ConsumerConfiguration.builder().setMaxRetries(3).build();
    final PartitionConsumer partitionConsumer = new ConcurrentPartitionConsumer(dataset, new InMemoryStatePersistor(), configuration);
    final PartitionKey partitionKey = generateUniqueKey();
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            dataset.getPartitionOutput(partitionKey).addPartition();
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            List<PartitionDetail> partitionDetails = partitionConsumer.consumePartitions(1).getPartitions();
            Assert.assertEquals(1, partitionDetails.size());
            // aborting the processing of the partition
            partitionConsumer.onFinish(partitionDetails, false);
            // abort were not found to have IN_PROGRESS state
            try {
                partitionConsumer.onFinish(partitionDetails, false);
                Assert.fail("Expected not to be able to abort a partition that is not IN_PROGRESS");
            } catch (IllegalStateException expected) {
            }
            // try to process the partition again, this time marking it as complete (by passing in true)
            partitionDetails = partitionConsumer.consumePartitions(1).getPartitions();
            Assert.assertEquals(1, partitionDetails.size());
            partitionConsumer.onFinish(partitionDetails, true);
            // is not found to have an IN_PROGRESS state
            try {
                partitionConsumer.onFinish(partitionDetails, true);
                Assert.fail("Expected not to be able to call onFinish on a partition is not IN_PROGRESS");
            } catch (IllegalArgumentException expected) {
            }
        }
    });
}
Also used : ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) TransactionAware(org.apache.tephra.TransactionAware) ConsumerConfiguration(co.cask.cdap.api.dataset.lib.partitioned.ConsumerConfiguration) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) PartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.PartitionConsumer) Test(org.junit.Test)

Example 64 with TransactionAware

use of org.apache.tephra.TransactionAware in project cdap by caskdata.

the class PartitionConsumerTest method testSimpleConcurrency.

@Test
public void testSimpleConcurrency() throws Exception {
    final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
    final TransactionAware txAwareDataset = (TransactionAware) dataset;
    final Set<PartitionKey> partitionKeys = new HashSet<>();
    for (int i = 0; i < 10; i++) {
        partitionKeys.add(generateUniqueKey());
    }
    // have ConcurrentPartitionConsumers that share the same state.
    InMemoryStatePersistor persistor = new InMemoryStatePersistor();
    ConsumerConfiguration configuration = ConsumerConfiguration.builder().setMaxRetries(3).build();
    final PartitionConsumer partitionConsumer1 = new ConcurrentPartitionConsumer(dataset, persistor, configuration);
    final PartitionConsumer partitionConsumer2 = new ConcurrentPartitionConsumer(dataset, persistor, configuration);
    final PartitionConsumer partitionConsumer3 = new ConcurrentPartitionConsumer(dataset, persistor, configuration);
    // add all ten keys to the partitioned fileset
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            for (final PartitionKey partitionKey : partitionKeys) {
                dataset.getPartitionOutput(partitionKey).addPartition();
            }
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // with limit = 1, the returned iterator is only size 1, even though there are more unconsumed partitions
            List<PartitionDetail> consumedBy1 = partitionConsumer1.consumePartitions(1).getPartitions();
            Assert.assertEquals(1, consumedBy1.size());
            // partitionConsumer2 asks for 10 partitions, but 1 is currently in progress by partitionConsumer1, so it only
            // gets the remaining 9 partitions
            List<PartitionDetail> consumedBy2 = partitionConsumer2.consumePartitions(10).getPartitions();
            Assert.assertEquals(9, consumedBy2.size());
            // partitionConsumer3 tries to consume partitions, but all are marked in-progress by partitionConsumer 1 and 2
            Assert.assertEquals(0, partitionConsumer3.consumePartitions().getPartitions().size());
            // partitionConsumer1 aborts its partition, so it then becomes available for partitionConsumer3
            partitionConsumer1.onFinish(consumedBy1, false);
            consumedBy1.clear();
            // queries with limit=2, but only the 1 is available that partitionConsumer1 released
            List<PartitionDetail> consumedBy3 = partitionConsumer3.consumePartitions(2).getPartitions();
            Assert.assertEquals(1, consumedBy3.size());
            // partitionConsumers 2 and 3 marks that it successfully processed the partitions
            partitionConsumer3.onFinish(consumedBy3, true);
            // test onFinishWithKeys API
            List<PartitionKey> keysConsumedBy2 = Lists.transform(consumedBy2, new Function<PartitionDetail, PartitionKey>() {

                @Override
                public PartitionKey apply(PartitionDetail input) {
                    return input.getPartitionKey();
                }
            });
            partitionConsumer2.onFinishWithKeys(keysConsumedBy2, true);
            // at this point, all partitions are processed, so no additional partitions are available for consumption
            Assert.assertEquals(0, partitionConsumer3.consumePartitions().getPartitions().size());
            List<PartitionDetail> allProcessedPartitions = new ArrayList<>();
            allProcessedPartitions.addAll(consumedBy1);
            allProcessedPartitions.addAll(consumedBy2);
            allProcessedPartitions.addAll(consumedBy3);
            // ordering may be different, since all the partitions were added in the same transaction
            Assert.assertEquals(partitionKeys, toKeys(allProcessedPartitions));
        }
    });
}
Also used : ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) PartitionDetail(co.cask.cdap.api.dataset.lib.PartitionDetail) Function(com.google.common.base.Function) TransactionAware(org.apache.tephra.TransactionAware) ConsumerConfiguration(co.cask.cdap.api.dataset.lib.partitioned.ConsumerConfiguration) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) PartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.PartitionConsumer) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 65 with TransactionAware

use of org.apache.tephra.TransactionAware in project cdap by caskdata.

the class TimePartitionedFileSetTest method testTimePartitionedInputArguments.

@Test
public void testTimePartitionedInputArguments() throws Exception {
    final long time8 = DATE_FORMAT.parse("10/17/2014 8:42 am").getTime();
    final long time9 = DATE_FORMAT.parse("10/17/2014 9:42 am").getTime();
    final String path8 = "8:42";
    final String path9 = "9:42";
    final PartitionFilter filter9 = PartitionFilter.builder().addRangeCondition("hour", 9, null).build();
    // add a few partitions
    {
        final TimePartitionedFileSet dataset = dsFrameworkUtil.getInstance(TPFS_INSTANCE);
        final TransactionAware txAwareDataset = (TransactionAware) dataset;
        dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                dataset.addPartition(time8, path8);
                dataset.addPartition(time9, path9);
            }
        });
    }
    // test specifying time range for input
    Map<String, String> arguments = Maps.newHashMap();
    TimePartitionedFileSetArguments.setInputStartTime(arguments, time8 - 30 * MINUTE);
    TimePartitionedFileSetArguments.setInputEndTime(arguments, time8 + 30 * MINUTE);
    testInputConfiguration(arguments, path8);
    // add a partition filter. it should not have an effect as long as there is a time range
    TimePartitionedFileSetArguments.setInputPartitionFilter(arguments, filter9);
    testInputConfiguration(arguments, path8);
    // test specifying input with a partition filter
    arguments.clear();
    TimePartitionedFileSetArguments.setInputPartitionFilter(arguments, filter9);
    testInputConfiguration(arguments, path9);
    // test specifying only a start time or only an end time for input, or none
    arguments.clear();
    TimePartitionedFileSetArguments.setInputStartTime(arguments, time8 + 30 * MINUTE);
    testInputConfigurationFailure(arguments, " with only a start time");
    arguments.clear();
    TimePartitionedFileSetArguments.setInputEndTime(arguments, time8 + 30 * MINUTE);
    testInputConfigurationFailure(arguments, " with only an end time");
}
Also used : PartitionFilter(co.cask.cdap.api.dataset.lib.PartitionFilter) TransactionAware(org.apache.tephra.TransactionAware) TimePartitionedFileSet(co.cask.cdap.api.dataset.lib.TimePartitionedFileSet) Test(org.junit.Test)

Aggregations

TransactionAware (org.apache.tephra.TransactionAware)91 Test (org.junit.Test)64 TransactionExecutor (org.apache.tephra.TransactionExecutor)47 Table (co.cask.cdap.api.dataset.table.Table)37 Transaction (org.apache.tephra.Transaction)30 IOException (java.io.IOException)27 DatasetAdmin (co.cask.cdap.api.dataset.DatasetAdmin)22 PartitionedFileSet (co.cask.cdap.api.dataset.lib.PartitionedFileSet)22 HBaseTable (co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable)21 PartitionKey (co.cask.cdap.api.dataset.lib.PartitionKey)17 DataSetException (co.cask.cdap.api.dataset.DataSetException)16 TransactionFailureException (org.apache.tephra.TransactionFailureException)15 List (java.util.List)13 ArrayList (java.util.ArrayList)11 HashSet (java.util.HashSet)11 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)10 PartitionNotFoundException (co.cask.cdap.api.dataset.PartitionNotFoundException)10 TimePartitionedFileSet (co.cask.cdap.api.dataset.lib.TimePartitionedFileSet)10 ConcurrentPartitionConsumer (co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer)10 PartitionConsumer (co.cask.cdap.api.dataset.lib.partitioned.PartitionConsumer)10