Search in sources :

Example 51 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class PartitionConsumerTest method testSimpleConcurrency.

@Test
public void testSimpleConcurrency() throws Exception {
    final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
    final TransactionAware txAwareDataset = (TransactionAware) dataset;
    final Set<PartitionKey> partitionKeys = new HashSet<>();
    for (int i = 0; i < 10; i++) {
        partitionKeys.add(generateUniqueKey());
    }
    // have ConcurrentPartitionConsumers that share the same state.
    InMemoryStatePersistor persistor = new InMemoryStatePersistor();
    ConsumerConfiguration configuration = ConsumerConfiguration.builder().setMaxRetries(3).build();
    final PartitionConsumer partitionConsumer1 = new ConcurrentPartitionConsumer(dataset, persistor, configuration);
    final PartitionConsumer partitionConsumer2 = new ConcurrentPartitionConsumer(dataset, persistor, configuration);
    final PartitionConsumer partitionConsumer3 = new ConcurrentPartitionConsumer(dataset, persistor, configuration);
    // add all ten keys to the partitioned fileset
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            for (final PartitionKey partitionKey : partitionKeys) {
                dataset.getPartitionOutput(partitionKey).addPartition();
            }
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // with limit = 1, the returned iterator is only size 1, even though there are more unconsumed partitions
            List<PartitionDetail> consumedBy1 = partitionConsumer1.consumePartitions(1).getPartitions();
            Assert.assertEquals(1, consumedBy1.size());
            // partitionConsumer2 asks for 10 partitions, but 1 is currently in progress by partitionConsumer1, so it only
            // gets the remaining 9 partitions
            List<PartitionDetail> consumedBy2 = partitionConsumer2.consumePartitions(10).getPartitions();
            Assert.assertEquals(9, consumedBy2.size());
            // partitionConsumer3 tries to consume partitions, but all are marked in-progress by partitionConsumer 1 and 2
            Assert.assertEquals(0, partitionConsumer3.consumePartitions().getPartitions().size());
            // partitionConsumer1 aborts its partition, so it then becomes available for partitionConsumer3
            partitionConsumer1.onFinish(consumedBy1, false);
            consumedBy1.clear();
            // queries with limit=2, but only the 1 is available that partitionConsumer1 released
            List<PartitionDetail> consumedBy3 = partitionConsumer3.consumePartitions(2).getPartitions();
            Assert.assertEquals(1, consumedBy3.size());
            // partitionConsumers 2 and 3 marks that it successfully processed the partitions
            partitionConsumer3.onFinish(consumedBy3, true);
            // test onFinishWithKeys API
            List<PartitionKey> keysConsumedBy2 = Lists.transform(consumedBy2, new Function<PartitionDetail, PartitionKey>() {

                @Override
                public PartitionKey apply(PartitionDetail input) {
                    return input.getPartitionKey();
                }
            });
            partitionConsumer2.onFinishWithKeys(keysConsumedBy2, true);
            // at this point, all partitions are processed, so no additional partitions are available for consumption
            Assert.assertEquals(0, partitionConsumer3.consumePartitions().getPartitions().size());
            List<PartitionDetail> allProcessedPartitions = new ArrayList<>();
            allProcessedPartitions.addAll(consumedBy1);
            allProcessedPartitions.addAll(consumedBy2);
            allProcessedPartitions.addAll(consumedBy3);
            // ordering may be different, since all the partitions were added in the same transaction
            Assert.assertEquals(partitionKeys, toKeys(allProcessedPartitions));
        }
    });
}
Also used : ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) PartitionDetail(co.cask.cdap.api.dataset.lib.PartitionDetail) Function(com.google.common.base.Function) TransactionAware(org.apache.tephra.TransactionAware) ConsumerConfiguration(co.cask.cdap.api.dataset.lib.partitioned.ConsumerConfiguration) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) PartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.PartitionConsumer) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 52 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class PartitionKeyTest method testBuilderGetter.

@Test
public void testBuilderGetter() {
    PartitionKey key = PartitionKey.builder().addField("a", "value").addField("b", 1L).addField("c", -17).addField("d", true).addIntField("e", 42).addLongField("f", 15).addStringField("g", "ghijk").build();
    Assert.assertEquals("value", key.getField("a"));
    Assert.assertEquals(1L, key.getField("b"));
    Assert.assertEquals(-17, key.getField("c"));
    Assert.assertEquals(true, key.getField("d"));
    Assert.assertEquals(42, key.getField("e"));
    Assert.assertEquals(15L, key.getField("f"));
    Assert.assertEquals("ghijk", key.getField("g"));
}
Also used : PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) Test(org.junit.Test)

Example 53 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class TimePartitionedFileSetTest method testOutputPartitionPath.

/**
   * Tests that the output file path is set correctly, based on the output partition time.
   */
@Test
public void testOutputPartitionPath() throws Exception {
    // test specifying output time
    Date date = DATE_FORMAT.parse("1/1/15 8:42 pm");
    Map<String, String> args = Maps.newHashMap();
    TimePartitionedFileSetArguments.setOutputPartitionTime(args, date.getTime());
    TimeZone timeZone = Calendar.getInstance().getTimeZone();
    TimePartitionedFileSetArguments.setOutputPathFormat(args, "yyyy-MM-dd/HH_mm", timeZone.getID());
    TimePartitionedFileSet ds = dsFrameworkUtil.getInstance(TPFS_INSTANCE, args);
    String outputPath = ds.getEmbeddedFileSet().getOutputLocation().toURI().getPath();
    Assert.assertTrue(outputPath.endsWith("2015-01-01/20_42"));
    Map<String, String> outputConfig = ds.getOutputFormatConfiguration();
    Assert.assertTrue(outputConfig.get(FileOutputFormat.OUTDIR).endsWith("2015-01-01/20_42"));
    // test specifying output time and partition key -> time should prevail
    PartitionKey key = PartitionKey.builder().addIntField("year", 2014).addIntField("month", 1).addIntField("day", 1).addIntField("hour", 20).addIntField("minute", 54).build();
    TimePartitionedFileSet ds1 = dsFrameworkUtil.getInstance(TPFS_INSTANCE, args);
    TimePartitionedFileSetArguments.setOutputPartitionKey(args, key);
    outputConfig = ds1.getOutputFormatConfiguration();
    Assert.assertTrue(outputConfig.get(FileOutputFormat.OUTDIR).endsWith("2015-01-01/20_42"));
    args.clear();
    TimePartitionedFileSetArguments.setOutputPartitionKey(args, key);
    TimePartitionedFileSet ds2 = dsFrameworkUtil.getInstance(TPFS_INSTANCE, args);
    outputConfig = ds2.getOutputFormatConfiguration();
    Assert.assertTrue(outputConfig.get(FileOutputFormat.OUTDIR).endsWith("54"));
    args.clear();
    TimePartitionedFileSet ds3 = dsFrameworkUtil.getInstance(TPFS_INSTANCE, args);
    try {
        ds3.getOutputFormatConfiguration();
        Assert.fail("getOutputFormatConfiguration should have failed with neither output time nor partition key");
    } catch (DataSetException e) {
    // expected
    }
}
Also used : TimeZone(java.util.TimeZone) DataSetException(co.cask.cdap.api.dataset.DataSetException) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) TimePartitionedFileSet(co.cask.cdap.api.dataset.lib.TimePartitionedFileSet) Date(java.util.Date) Test(org.junit.Test)

Example 54 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class CoreSchedulerServiceTest method publishNotification.

private void publishNotification(TopicId topicId, ProgramId programId, String dataset) throws TopicNotFoundException, IOException, TransactionFailureException, AlreadyExistsException, BadRequestException {
    DatasetId datasetId = programId.getNamespaceId().dataset(dataset);
    PartitionKey partitionKey = PartitionKey.builder().addIntField("part1", 1).build();
    Notification notification = Notification.forPartitions(datasetId, ImmutableList.of(partitionKey));
    messagingService.publish(StoreRequestBuilder.of(topicId).addPayloads(GSON.toJson(notification)).build());
}
Also used : PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) Notification(co.cask.cdap.proto.Notification) DatasetId(co.cask.cdap.proto.id.DatasetId)

Example 55 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class PartitionConsumerTest method testPartitionConsumingWithFilterAndLimit.

@Test
public void testPartitionConsumingWithFilterAndLimit() throws Exception {
    final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
    final TransactionAware txAwareDataset = (TransactionAware) dataset;
    final Set<PartitionKey> partitionKeys1 = new HashSet<>();
    for (int i = 0; i < 10; i++) {
        partitionKeys1.add(generateUniqueKey());
    }
    final Set<PartitionKey> partitionKeys2 = new HashSet<>();
    for (int i = 0; i < 15; i++) {
        partitionKeys2.add(generateUniqueKey());
    }
    final PartitionConsumer partitionConsumer = new ConcurrentPartitionConsumer(dataset, new InMemoryStatePersistor());
    // (consumption only happens at transaction borders)
    for (final PartitionKey partitionKey : partitionKeys1) {
        dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

            @Override
            public void apply() throws Exception {
                dataset.getPartitionOutput(partitionKey).addPartition();
            }
        });
    }
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // Initial consumption results in the partitions corresponding to partitionKeys1 to be consumed because only
            // those partitions are added to the dataset at this point
            List<Partition> consumedPartitions = new ArrayList<>();
            // with limit = 1, the returned iterator is only size 1, even though there are more unconsumed partitions
            Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(1).getPartitions());
            Assert.assertEquals(1, consumedPartitions.size());
            // ask for 5 more
            Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(5).getPartitions());
            Assert.assertEquals(6, consumedPartitions.size());
            // ask for 5 more, but there are only 4 more unconsumed partitions (size of partitionKeys1 is 10).
            Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(5).getPartitions());
            Assert.assertEquals(10, consumedPartitions.size());
            Assert.assertEquals(partitionKeys1, toKeys(consumedPartitions));
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            for (PartitionKey partitionKey : partitionKeys2) {
                dataset.getPartitionOutput(partitionKey).addPartition();
            }
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // using the same PartitionConsumer (which remembers the PartitionConsumerState) to consume additional
            // partitions results in only the newly added partitions (corresponding to partitionKeys2) to be returned
            Assert.assertEquals(partitionKeys2, toKeys(partitionConsumer.consumePartitions().getPartitions()));
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // consuming the partitions again, without adding any new partitions returns an empty iterator
            Assert.assertTrue(partitionConsumer.consumePartitions().getPartitions().isEmpty());
        }
    });
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // creating a new PartitionConsumer resets the consumption state.
            // test combination of filter and limit
            // the partitionFilter will match partitionKeys [1, 7), of which there are 6
            final PartitionFilter partitionFilter = PartitionFilter.builder().addRangeCondition("i", 1, 7).build();
            final Predicate<PartitionDetail> predicate = new Predicate<PartitionDetail>() {

                @Override
                public boolean apply(PartitionDetail partitionDetail) {
                    return partitionFilter.match(partitionDetail.getPartitionKey());
                }
            };
            ConsumerConfiguration configuration = ConsumerConfiguration.builder().setPartitionPredicate(predicate).build();
            PartitionConsumer newPartitionConsumer = new ConcurrentPartitionConsumer(dataset, new InMemoryStatePersistor(), configuration);
            List<Partition> consumedPartitions = new ArrayList<>();
            // apply the filter (narrows it down to 6 elements) and apply a limit of 4 results in 4 consumed partitions
            Iterables.addAll(consumedPartitions, newPartitionConsumer.consumePartitions(4).getPartitions());
            Assert.assertEquals(4, consumedPartitions.size());
            // apply a limit of 3, using the same filter returns the remaining 2 elements that fit that filter
            Iterables.addAll(consumedPartitions, newPartitionConsumer.consumePartitions(3).getPartitions());
            Assert.assertEquals(6, consumedPartitions.size());
            // assert that the partitions returned have partition keys, where the i values range from [1, 7]
            Set<Integer> expectedIFields = new HashSet<>();
            for (int i = 1; i < 7; i++) {
                expectedIFields.add(i);
            }
            Set<Integer> actualIFields = new HashSet<>();
            for (Partition consumedPartition : consumedPartitions) {
                actualIFields.add((Integer) consumedPartition.getPartitionKey().getField("i"));
            }
            Assert.assertEquals(expectedIFields, actualIFields);
        }
    });
}
Also used : ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) ConsumablePartition(co.cask.cdap.api.dataset.lib.partitioned.ConsumablePartition) Partition(co.cask.cdap.api.dataset.lib.Partition) HashSet(java.util.HashSet) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) Set(java.util.Set) ConsumerWorkingSet(co.cask.cdap.api.dataset.lib.partitioned.ConsumerWorkingSet) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) PartitionDetail(co.cask.cdap.api.dataset.lib.PartitionDetail) Predicate(co.cask.cdap.api.Predicate) PartitionFilter(co.cask.cdap.api.dataset.lib.PartitionFilter) TransactionAware(org.apache.tephra.TransactionAware) ConsumerConfiguration(co.cask.cdap.api.dataset.lib.partitioned.ConsumerConfiguration) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) ConcurrentPartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer) PartitionConsumer(co.cask.cdap.api.dataset.lib.partitioned.PartitionConsumer) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

PartitionKey (co.cask.cdap.api.dataset.lib.PartitionKey)59 PartitionedFileSet (co.cask.cdap.api.dataset.lib.PartitionedFileSet)28 Test (org.junit.Test)27 TransactionAware (org.apache.tephra.TransactionAware)17 TransactionExecutor (org.apache.tephra.TransactionExecutor)17 IOException (java.io.IOException)12 HashMap (java.util.HashMap)12 PartitionDetail (co.cask.cdap.api.dataset.lib.PartitionDetail)11 ConcurrentPartitionConsumer (co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer)11 PartitionConsumer (co.cask.cdap.api.dataset.lib.partitioned.PartitionConsumer)11 ArrayList (java.util.ArrayList)11 List (java.util.List)11 HashSet (java.util.HashSet)10 DataSetException (co.cask.cdap.api.dataset.DataSetException)9 ImmutableList (com.google.common.collect.ImmutableList)9 PartitionNotFoundException (co.cask.cdap.api.dataset.PartitionNotFoundException)7 Partition (co.cask.cdap.api.dataset.lib.Partition)7 ConsumerConfiguration (co.cask.cdap.api.dataset.lib.partitioned.ConsumerConfiguration)7 TimePartitionedFileSet (co.cask.cdap.api.dataset.lib.TimePartitionedFileSet)6 Location (org.apache.twill.filesystem.Location)6