Search in sources :

Example 21 with PartitionedFileSet

use of io.cdap.cdap.api.dataset.lib.PartitionedFileSet in project cdap by caskdata.

the class PartitionConsumerTest method testDroppedPartitions.

@Test
public void testDroppedPartitions() throws Exception {
    // Tests the case of a partition in the partition consumer working set being dropped from the Partitioned
    // FileSet (See CDAP-6215)
    final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
    final TransactionAware txAwareDataset = (TransactionAware) dataset;
    ConsumerConfiguration configuration = ConsumerConfiguration.builder().setMaxWorkingSetSize(1).setMaxRetries(2).build();
    final PartitionConsumer partitionConsumer = new ConcurrentPartitionConsumer(dataset, new InMemoryStatePersistor(), configuration);
    final PartitionKey partitionKey1 = generateUniqueKey();
    final PartitionKey partitionKey2 = generateUniqueKey();
    // Note: These two partitions are added in separate transactions, so that the first can exist in the working set
    // without the second. Partitions in the same transaction can not be split up (due to their index being the same)
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            dataset.getPartitionOutput(partitionKey1).addPartition();
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            dataset.getPartitionOutput(partitionKey2).addPartition();
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // consuming and aborting the partition numRetries times plus one (for the first attempt) makes it get removed
            // from the working set
            List<PartitionDetail> partitionDetails = partitionConsumer.consumePartitions(1).getPartitions();
            Assert.assertEquals(1, partitionDetails.size());
            Assert.assertEquals(partitionKey1, partitionDetails.get(0).getPartitionKey());
            // aborting the processing of the partition, to put it back in the working set
            partitionConsumer.onFinish(partitionDetails, false);
        }
    });
    // dropping partitionKey1 from the dataset makes it no longer available for consuming
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            dataset.dropPartition(partitionKey1);
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // first call to consume will drop the partition from the working set, and return nothing, since it was
            // the only partition in the working set
            PartitionConsumerResult result = partitionConsumer.consumePartitions(1);
            Assert.assertEquals(0, result.getPartitions().size());
            Assert.assertEquals(0, result.getFailedPartitions().size());
            // following calls to consumePartitions will repopulate the working set and return additional partition(s)
            result = partitionConsumer.consumePartitions(1);
            Assert.assertEquals(1, result.getPartitions().size());
            Assert.assertEquals(partitionKey2, result.getPartitions().get(0).getPartitionKey());
        }
    });
}
Also used : ConcurrentPartitionConsumer(io.cdap.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) PartitionConsumerResult(io.cdap.cdap.api.dataset.lib.partitioned.PartitionConsumerResult) PartitionedFileSet(io.cdap.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) TransactionAware(org.apache.tephra.TransactionAware) ConsumerConfiguration(io.cdap.cdap.api.dataset.lib.partitioned.ConsumerConfiguration) PartitionKey(io.cdap.cdap.api.dataset.lib.PartitionKey) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) PartitionConsumer(io.cdap.cdap.api.dataset.lib.partitioned.PartitionConsumer) ConcurrentPartitionConsumer(io.cdap.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) Test(org.junit.Test)

Example 22 with PartitionedFileSet

use of io.cdap.cdap.api.dataset.lib.PartitionedFileSet in project cdap by caskdata.

the class PartitionConsumerTest method testCustomOperations.

@Test
public void testCustomOperations() throws Exception {
    final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
    final TransactionAware txAwareDataset = (TransactionAware) dataset;
    ConsumerConfiguration configuration = ConsumerConfiguration.builder().setMaxRetries(3).build();
    final PartitionConsumer partitionConsumer = new CustomConsumer(dataset, new InMemoryStatePersistor(), configuration);
    final int numPartitions = 3;
    final List<PartitionKey> partitionKeys = new ArrayList<>(numPartitions);
    for (int i = 0; i < numPartitions; i++) {
        partitionKeys.add(generateUniqueKey());
    }
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            for (PartitionKey partitionKey : partitionKeys) {
                dataset.getPartitionOutput(partitionKey).addPartition();
            }
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            List<PartitionDetail> partitions = partitionConsumer.consumePartitions().getPartitions();
            Assert.assertEquals(numPartitions, partitions.size());
            partitionConsumer.onFinish(partitions, false);
            partitions = partitionConsumer.consumePartitions().getPartitions();
            Assert.assertEquals(numPartitions, partitions.size());
            partitionConsumer.onFinish(partitions, false);
            // after two failure attempts, the partitions are now returned individually
            partitions = partitionConsumer.consumePartitions().getPartitions();
            Assert.assertEquals(1, partitions.size());
            partitionConsumer.onFinish(partitions, true);
            partitions = partitionConsumer.consumePartitions().getPartitions();
            Assert.assertEquals(1, partitions.size());
            partitionConsumer.onFinish(partitions, true);
            partitions = partitionConsumer.consumePartitions().getPartitions();
            Assert.assertEquals(1, partitions.size());
            partitionConsumer.onFinish(partitions, true);
        }
    });
}
Also used : ArrayList(java.util.ArrayList) PartitionedFileSet(io.cdap.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) TransactionAware(org.apache.tephra.TransactionAware) ConsumerConfiguration(io.cdap.cdap.api.dataset.lib.partitioned.ConsumerConfiguration) PartitionKey(io.cdap.cdap.api.dataset.lib.PartitionKey) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) PartitionConsumer(io.cdap.cdap.api.dataset.lib.partitioned.PartitionConsumer) ConcurrentPartitionConsumer(io.cdap.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) Test(org.junit.Test)

Example 23 with PartitionedFileSet

use of io.cdap.cdap.api.dataset.lib.PartitionedFileSet in project cdap by caskdata.

the class PartitionedFileSetTest method testInvalidPartitionKey.

@Test
public void testInvalidPartitionKey() throws Exception {
    final PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsInstance);
    dsFrameworkUtil.newTransactionExecutor((TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            try {
                pfs.getPartitionOutput(PartitionKey.builder().addField("i", 1).addField("l", 2L).build());
                Assert.fail("should have thrown exception due to missing field");
            } catch (IllegalArgumentException e) {
            // expected
            }
            try {
                pfs.addPartition(PartitionKey.builder().addField("i", 1).addField("l", "2").addField("s", "a").build(), "some/location");
                Assert.fail("should have thrown exception due to incompatible field");
            } catch (IllegalArgumentException e) {
            // expected
            }
            try {
                pfs.addPartition(PartitionKey.builder().addField("i", 1).addField("l", 2L).addField("s", "a").addField("x", "x").build(), "some/location", ImmutableMap.of("a", "b"));
                Assert.fail("should have thrown exception due to extra field");
            } catch (IllegalArgumentException e) {
            // expected
            }
            pfs.addPartition(PartitionKey.builder().addField("i", 1).addField("l", 2L).addField("s", "a").build(), "some/location", ImmutableMap.of("a", "b"));
            try {
                pfs.addMetadata(PartitionKey.builder().addField("i", 1).addField("l", 2L).addField("s", "a").addField("x", "x").build(), ImmutableMap.of("abc", "xyz"));
                Assert.fail("should have thrown exception due to extra field");
            } catch (IllegalArgumentException e) {
            // expected
            }
            try {
                pfs.dropPartition(PartitionKey.builder().addField("i", 1).addField("l", 2L).addField("s", 0).build());
                Assert.fail("should have thrown exception due to incompatible field");
            } catch (IllegalArgumentException e) {
            // expected
            }
        }
    });
}
Also used : TransactionAware(org.apache.tephra.TransactionAware) PartitionedFileSet(io.cdap.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) DataSetException(io.cdap.cdap.api.dataset.DataSetException) PartitionNotFoundException(io.cdap.cdap.api.dataset.PartitionNotFoundException) PartitionAlreadyExistsException(io.cdap.cdap.api.dataset.lib.PartitionAlreadyExistsException) IOException(java.io.IOException) Test(org.junit.Test)

Example 24 with PartitionedFileSet

use of io.cdap.cdap.api.dataset.lib.PartitionedFileSet in project cdap by caskdata.

the class PartitionedFileSetTest method testAddRemoveGetPartition.

@Test
public void testAddRemoveGetPartition() throws Exception {
    final PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsInstance);
    final AtomicReference<Location> outputLocationRef = new AtomicReference<>();
    dsFrameworkUtil.newTransactionExecutor((TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            Location outputLocation = createPartition(pfs, PARTITION_KEY, "file");
            outputLocationRef.set(outputLocation);
            Assert.assertTrue(outputLocation.exists());
            Assert.assertNotNull(pfs.getPartition(PARTITION_KEY));
            Assert.assertTrue(pfs.getPartition(PARTITION_KEY).getLocation().exists());
            pfs.dropPartition(PARTITION_KEY);
            Assert.assertFalse(outputLocation.exists());
            Assert.assertNull(pfs.getPartition(PARTITION_KEY));
            pfs.dropPartition(PARTITION_KEY);
        }
    });
    // the files of the partition are dropped upon transaction commit
    Assert.assertFalse(outputLocationRef.get().exists());
}
Also used : TransactionAware(org.apache.tephra.TransactionAware) PartitionedFileSet(io.cdap.cdap.api.dataset.lib.PartitionedFileSet) AtomicReference(java.util.concurrent.atomic.AtomicReference) TransactionExecutor(org.apache.tephra.TransactionExecutor) DataSetException(io.cdap.cdap.api.dataset.DataSetException) PartitionNotFoundException(io.cdap.cdap.api.dataset.PartitionNotFoundException) PartitionAlreadyExistsException(io.cdap.cdap.api.dataset.lib.PartitionAlreadyExistsException) IOException(java.io.IOException) Location(org.apache.twill.filesystem.Location) Test(org.junit.Test)

Example 25 with PartitionedFileSet

use of io.cdap.cdap.api.dataset.lib.PartitionedFileSet in project cdap by caskdata.

the class PartitionedFileSetTest method testPartitionConsumingWithFilterAndLimit.

@Test
public void testPartitionConsumingWithFilterAndLimit() throws Exception {
    final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
    final TransactionAware txAwareDataset = (TransactionAware) dataset;
    final Set<PartitionKey> partitionKeys1 = Sets.newHashSet();
    for (int i = 0; i < 10; i++) {
        partitionKeys1.add(generateUniqueKey());
    }
    final Set<PartitionKey> partitionKeys2 = Sets.newHashSet();
    for (int i = 0; i < 15; i++) {
        partitionKeys2.add(generateUniqueKey());
    }
    final SimplePartitionConsumer partitionConsumer = new SimplePartitionConsumer(dataset);
    // (consumption only happens at transaction borders)
    for (final PartitionKey partitionKey : partitionKeys1) {
        dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                dataset.getPartitionOutput(partitionKey).addPartition();
            }
        });
    }
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // Initial consumption results in the partitions corresponding to partitionKeys1 to be consumed because only
            // those partitions are added to the dataset at this point
            List<Partition> consumedPartitions = Lists.newArrayList();
            // with limit = 1, the returned iterator is only size 1, even though there are more unconsumed partitions
            Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(1));
            Assert.assertEquals(1, consumedPartitions.size());
            // ask for 5 more
            Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(5));
            Assert.assertEquals(6, consumedPartitions.size());
            // ask for 5 more, but there are only 4 more unconsumed partitions (size of partitionKeys1 is 10).
            Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(5));
            Assert.assertEquals(10, consumedPartitions.size());
            Set<PartitionKey> retrievedKeys = Sets.newHashSet();
            for (Partition consumedPartition : consumedPartitions) {
                retrievedKeys.add(consumedPartition.getPartitionKey());
            }
            Assert.assertEquals(partitionKeys1, retrievedKeys);
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            for (PartitionKey partitionKey : partitionKeys2) {
                dataset.getPartitionOutput(partitionKey).addPartition();
            }
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // using the same PartitionConsumer (which remembers the PartitionConsumerState) to consume additional
            // partitions results in only the newly added partitions (corresponding to partitionKeys2) to be returned
            List<Partition> consumedPartitions = Lists.newArrayList();
            Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(1));
            // even though we set limit to 1 in the previous call to consumePartitions, we get all the elements of
            // partitionKeys2, because they were all added in the same transaction
            Set<PartitionKey> retrievedKeys = Sets.newHashSet();
            for (Partition consumedPartition : consumedPartitions) {
                retrievedKeys.add(consumedPartition.getPartitionKey());
            }
            Assert.assertEquals(partitionKeys2, retrievedKeys);
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // consuming the partitions again, without adding any new partitions returns an empty iterator
            Assert.assertTrue(partitionConsumer.consumePartitions().isEmpty());
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // creating a new PartitionConsumer resets the consumption state.
            // test combination of filter and limit
            SimplePartitionConsumer newPartitionConsumer = new SimplePartitionConsumer(dataset);
            List<Partition> consumedPartitions = Lists.newArrayList();
            // the partitionFilter will match partitionKeys [1, 7), of which there are 6
            final PartitionFilter partitionFilter = PartitionFilter.builder().addRangeCondition("i", 1, 7).build();
            final Predicate<PartitionDetail> predicate = new Predicate<PartitionDetail>() {

                @Override
                public boolean apply(PartitionDetail partitionDetail) {
                    return partitionFilter.match(partitionDetail.getPartitionKey());
                }
            };
            // apply the filter (narrows it down to 6 elements) and apply a limit of 4 results in 4 consumed partitions
            Iterables.addAll(consumedPartitions, newPartitionConsumer.consumePartitions(4, predicate));
            Assert.assertEquals(4, consumedPartitions.size());
            // apply a limit of 3, using the same filter returns the remaining 2 elements that fit that filter
            Iterables.addAll(consumedPartitions, newPartitionConsumer.consumePartitions(3, predicate));
            Assert.assertEquals(6, consumedPartitions.size());
            // assert that the partitions returned have partition keys, where the i values range from [1, 7]
            Set<Integer> expectedIFields = new HashSet<>();
            for (int i = 1; i < 7; i++) {
                expectedIFields.add(i);
            }
            Set<Integer> actualIFields = new HashSet<>();
            for (Partition consumedPartition : consumedPartitions) {
                actualIFields.add((Integer) consumedPartition.getPartitionKey().getField("i"));
            }
            Assert.assertEquals(expectedIFields, actualIFields);
        }
    });
}
Also used : Partition(io.cdap.cdap.api.dataset.lib.Partition) PartitionedFileSet(io.cdap.cdap.api.dataset.lib.PartitionedFileSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) FileSet(io.cdap.cdap.api.dataset.lib.FileSet) HashSet(java.util.HashSet) PartitionedFileSet(io.cdap.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) PartitionDetail(io.cdap.cdap.api.dataset.lib.PartitionDetail) DataSetException(io.cdap.cdap.api.dataset.DataSetException) PartitionNotFoundException(io.cdap.cdap.api.dataset.PartitionNotFoundException) PartitionAlreadyExistsException(io.cdap.cdap.api.dataset.lib.PartitionAlreadyExistsException) IOException(java.io.IOException) Predicate(io.cdap.cdap.api.Predicate) PartitionFilter(io.cdap.cdap.api.dataset.lib.PartitionFilter) TransactionAware(org.apache.tephra.TransactionAware) PartitionKey(io.cdap.cdap.api.dataset.lib.PartitionKey) List(java.util.List) Test(org.junit.Test)

Aggregations

PartitionedFileSet (io.cdap.cdap.api.dataset.lib.PartitionedFileSet)112 Test (org.junit.Test)75 PartitionKey (io.cdap.cdap.api.dataset.lib.PartitionKey)53 Location (org.apache.twill.filesystem.Location)47 TransactionAware (org.apache.tephra.TransactionAware)44 TransactionExecutor (org.apache.tephra.TransactionExecutor)44 PartitionDetail (io.cdap.cdap.api.dataset.lib.PartitionDetail)28 IOException (java.io.IOException)26 DataSetException (io.cdap.cdap.api.dataset.DataSetException)24 FileSet (io.cdap.cdap.api.dataset.lib.FileSet)24 List (java.util.List)24 PartitionNotFoundException (io.cdap.cdap.api.dataset.PartitionNotFoundException)22 PartitionAlreadyExistsException (io.cdap.cdap.api.dataset.lib.PartitionAlreadyExistsException)22 ConcurrentPartitionConsumer (io.cdap.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer)22 PartitionConsumer (io.cdap.cdap.api.dataset.lib.partitioned.PartitionConsumer)22 HashSet (java.util.HashSet)19 ImmutableList (com.google.common.collect.ImmutableList)18 ArrayList (java.util.ArrayList)18 TimePartitionedFileSet (io.cdap.cdap.api.dataset.lib.TimePartitionedFileSet)16 TransactionContext (org.apache.tephra.TransactionContext)16