Search in sources :

Example 11 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class PartitionedFileSetTest method testMetadataForNonexistentPartition.

@Test
public void testMetadataForNonexistentPartition() throws Exception {
    PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsInstance);
    PartitionKey key = generateUniqueKey();
    TransactionContext txContext = new TransactionContext(txClient, (TransactionAware) pfs);
    txContext.start();
    try {
        // didn't add any partitions to the dataset, so any partition key should throw a PartitionNotFoundException
        pfs.addMetadata(key, "metaKey", "metaValue");
        Assert.fail("Expected not to find key: " + key);
    } catch (PartitionNotFoundException e) {
        Assert.assertEquals(pfsInstance.getEntityName(), e.getPartitionedFileSetName());
        Assert.assertEquals(key, e.getPartitionKey());
    } finally {
        txContext.abort();
    }
}
Also used : PartitionNotFoundException(co.cask.cdap.api.dataset.PartitionNotFoundException) TransactionContext(org.apache.tephra.TransactionContext) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) Test(org.junit.Test)

Example 12 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class PartitionKeyTest method testEqualityHashCode.

@Test
public void testEqualityHashCode() {
    PartitionKey key1 = PartitionKey.builder().addField("a", "value").addField("b", 1L).addField("c", -17).build();
    PartitionKey key2 = PartitionKey.builder().addField("b", 1L).addField("c", -17).addField("a", "value").build();
    Assert.assertEquals(key1, key2);
    Assert.assertEquals(key1.hashCode(), key2.hashCode());
}
Also used : PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) Test(org.junit.Test)

Example 13 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class ClicksAndViewsMapReduce method initialize.

@Override
public void initialize() throws Exception {
    MapReduceContext context = getContext();
    context.addInput(Input.ofStream(ClicksAndViews.CLICKS));
    context.addInput(Input.ofStream(ClicksAndViews.VIEWS));
    PartitionedFileSet joinedPFS = context.getDataset(ClicksAndViews.JOINED);
    PartitionKey outputPartitionKey = PartitionedFileSetArguments.getOutputPartitionKey(context.getRuntimeArguments(), joinedPFS.getPartitioning());
    if (outputPartitionKey == null) {
        outputPartitionKey = PartitionKey.builder().addLongField("runtime", context.getLogicalStartTime()).build();
    }
    Map<String, String> outputArgs = new HashMap<>();
    PartitionedFileSetArguments.setOutputPartitionKey(outputArgs, outputPartitionKey);
    context.addOutput(Output.ofDataset(ClicksAndViews.JOINED, outputArgs));
    Job job = context.getHadoopJob();
    job.setMapperClass(ImpressionKeyingMapper.class);
    job.setReducerClass(JoiningReducer.class);
}
Also used : MapReduceContext(co.cask.cdap.api.mapreduce.MapReduceContext) HashMap(java.util.HashMap) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) Job(org.apache.hadoop.mapreduce.Job)

Example 14 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class ConcurrentPartitionConsumer method untake.

@Override
public void untake(ConsumerWorkingSet workingSet, List<? extends PartitionKey> partitionKeys) {
    doExpiry(workingSet);
    for (PartitionKey key : partitionKeys) {
        ConsumablePartition consumablePartition = workingSet.lookup(key);
        // don't need to assertInProgress because untake() already does that
        consumablePartition.untake();
    }
}
Also used : PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey)

Example 15 with PartitionKey

use of co.cask.cdap.api.dataset.lib.PartitionKey in project cdap by caskdata.

the class ConcurrentPartitionConsumer method commit.

/**
   * Removes the given partition keys from the working set, as they have been successfully processed.
   */
protected void commit(ConsumerWorkingSet workingSet, List<? extends PartitionKey> partitionKeys) {
    for (PartitionKey key : partitionKeys) {
        ConsumablePartition consumablePartition = workingSet.lookup(key);
        assertInProgress(consumablePartition);
        workingSet.remove(key);
    }
}
Also used : PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey)

Aggregations

PartitionKey (co.cask.cdap.api.dataset.lib.PartitionKey)59 PartitionedFileSet (co.cask.cdap.api.dataset.lib.PartitionedFileSet)28 Test (org.junit.Test)27 TransactionAware (org.apache.tephra.TransactionAware)17 TransactionExecutor (org.apache.tephra.TransactionExecutor)17 IOException (java.io.IOException)12 HashMap (java.util.HashMap)12 PartitionDetail (co.cask.cdap.api.dataset.lib.PartitionDetail)11 ConcurrentPartitionConsumer (co.cask.cdap.api.dataset.lib.partitioned.ConcurrentPartitionConsumer)11 PartitionConsumer (co.cask.cdap.api.dataset.lib.partitioned.PartitionConsumer)11 ArrayList (java.util.ArrayList)11 List (java.util.List)11 HashSet (java.util.HashSet)10 DataSetException (co.cask.cdap.api.dataset.DataSetException)9 ImmutableList (com.google.common.collect.ImmutableList)9 PartitionNotFoundException (co.cask.cdap.api.dataset.PartitionNotFoundException)7 Partition (co.cask.cdap.api.dataset.lib.Partition)7 ConsumerConfiguration (co.cask.cdap.api.dataset.lib.partitioned.ConsumerConfiguration)7 TimePartitionedFileSet (co.cask.cdap.api.dataset.lib.TimePartitionedFileSet)6 Location (org.apache.twill.filesystem.Location)6