Search in sources :

Example 1 with TimePartitionDetail

use of io.cdap.cdap.api.dataset.lib.TimePartitionDetail in project cdap by caskdata.

the class TimePartitionedFileSetTest method validateTimePartitions.

private void validateTimePartitions(TimePartitionedFileSet dataset, long startTime, long endTime, Map<Long, String> expected) {
    // validate getPartitionsByTime()
    Set<TimePartitionDetail> partitions = dataset.getPartitionsByTime(startTime, endTime);
    Assert.assertEquals(expected.size(), partitions.size());
    for (TimePartitionDetail partition : partitions) {
        Assert.assertEquals(expected.get(partition.getTime()), partition.getRelativePath());
    }
}
Also used : TimePartitionDetail(io.cdap.cdap.api.dataset.lib.TimePartitionDetail)

Example 2 with TimePartitionDetail

use of io.cdap.cdap.api.dataset.lib.TimePartitionDetail in project cdap by caskdata.

the class TimePartitionedFileSetTest method testPartitionMetadata.

@Test
public void testPartitionMetadata() throws Exception {
    final TimePartitionedFileSet tpfs = dsFrameworkUtil.getInstance(TPFS_INSTANCE);
    TransactionAware txAware = (TransactionAware) tpfs;
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAware).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // make sure the dataset has no partitions
            validateTimePartitions(tpfs, 0L, MAX, Collections.<Long, String>emptyMap());
            Date date = DATE_FORMAT.parse("6/4/12 10:00 am");
            long time = date.getTime();
            // keep track of all the metadata added
            Map<String, String> allMetadata = Maps.newHashMap();
            Map<String, String> metadata = ImmutableMap.of("key1", "value1", "key2", "value3", "key100", "value4");
            tpfs.addPartition(time, "file", metadata);
            allMetadata.putAll(metadata);
            TimePartitionDetail partitionByTime = tpfs.getPartitionByTime(time);
            Assert.assertNotNull(partitionByTime);
            Assert.assertEquals(metadata, partitionByTime.getMetadata().asMap());
            tpfs.addMetadata(time, "key3", "value4");
            allMetadata.put("key3", "value4");
            // using the setMetadata API, adding an entry, for a key that already exists will overwrite the previous value
            tpfs.setMetadata(time, Collections.singletonMap("key3", "value5"));
            allMetadata.put("key3", "value5");
            Map<String, String> newMetadata = ImmutableMap.of("key4", "value4", "key5", "value5");
            tpfs.addMetadata(time, newMetadata);
            allMetadata.putAll(newMetadata);
            try {
                // attempting to update an existing key throws a DatasetException
                tpfs.addMetadata(time, "key3", "value5");
                Assert.fail("Expected not to be able to update an existing metadata entry");
            } catch (DataSetException expected) {
            }
            partitionByTime = tpfs.getPartitionByTime(time);
            Assert.assertNotNull(partitionByTime);
            Assert.assertEquals(allMetadata, partitionByTime.getMetadata().asMap());
            // remove metadata entries; specifying metadata key that does not exist ('key6') does not cause an error
            tpfs.removeMetadata(time, ImmutableSet.of("key4", "key5", "key6"));
            allMetadata.remove("key4");
            allMetadata.remove("key5");
            partitionByTime = tpfs.getPartitionByTime(time);
            Assert.assertNotNull(partitionByTime);
            Assert.assertEquals(allMetadata, partitionByTime.getMetadata().asMap());
        }
    });
}
Also used : DataSetException(io.cdap.cdap.api.dataset.DataSetException) TransactionAware(org.apache.tephra.TransactionAware) TransactionExecutor(org.apache.tephra.TransactionExecutor) TimePartitionDetail(io.cdap.cdap.api.dataset.lib.TimePartitionDetail) TimePartitionedFileSet(io.cdap.cdap.api.dataset.lib.TimePartitionedFileSet) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) TransactionFailureException(org.apache.tephra.TransactionFailureException) UnauthorizedException(io.cdap.cdap.security.spi.authorization.UnauthorizedException) DataSetException(io.cdap.cdap.api.dataset.DataSetException) DatasetManagementException(io.cdap.cdap.api.dataset.DatasetManagementException) IOException(java.io.IOException) Date(java.util.Date) Test(org.junit.Test)

Example 3 with TimePartitionDetail

use of io.cdap.cdap.api.dataset.lib.TimePartitionDetail in project cdap by caskdata.

the class MapReduceWithPartitionedTest method testTimePartitionedWithMR.

@Test
public void testTimePartitionedWithMR() throws Exception {
    final ApplicationWithPrograms app = deployApp(AppWithTimePartitionedFileSet.class);
    // write a value to the input table
    final Table table = datasetCache.getDataset(AppWithTimePartitionedFileSet.INPUT);
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) table).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            table.put(Bytes.toBytes("x"), AppWithTimePartitionedFileSet.ONLY_COLUMN, Bytes.toBytes("1"));
        }
    });
    final long time = DATE_FORMAT.parse("1/15/15 11:15 am").getTime();
    final long time5 = time + TimeUnit.MINUTES.toMillis(5);
    // run the partition writer m/r with this output partition time
    Map<String, String> runtimeArguments = Maps.newHashMap();
    Map<String, String> outputArgs = Maps.newHashMap();
    TimePartitionedFileSetArguments.setOutputPartitionTime(outputArgs, time);
    final ImmutableMap<String, String> assignedMetadata = ImmutableMap.of("region", "13", "data.source.name", "input", "data.source.type", "table");
    TimePartitionedFileSetArguments.setOutputPartitionMetadata(outputArgs, assignedMetadata);
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, TIME_PARTITIONED, outputArgs));
    Assert.assertTrue(runProgram(app, AppWithTimePartitionedFileSet.PartitionWriter.class, new BasicArguments(runtimeArguments)));
    // this should have created a partition in the tpfs
    final TimePartitionedFileSet tpfs = datasetCache.getDataset(TIME_PARTITIONED);
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) tpfs).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            TimePartitionDetail partition = tpfs.getPartitionByTime(time);
            Assert.assertNotNull(partition);
            String path = partition.getRelativePath();
            Assert.assertNotNull(path);
            Assert.assertTrue(path.contains("2015-01-15/11-15"));
            Assert.assertEquals(assignedMetadata, partition.getMetadata().asMap());
        }
    });
    // delete the data in the input table and write a new row
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) table).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            table.delete(Bytes.toBytes("x"));
            table.put(Bytes.toBytes("y"), AppWithTimePartitionedFileSet.ONLY_COLUMN, Bytes.toBytes("2"));
        }
    });
    // now run the m/r again with a new partition time, say 5 minutes later
    TimePartitionedFileSetArguments.setOutputPartitionTime(outputArgs, time5);
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, TIME_PARTITIONED, outputArgs));
    // make the mapreduce add the partition in destroy, to validate that this does not fail the job
    runtimeArguments.put(AppWithTimePartitionedFileSet.COMPAT_ADD_PARTITION, "true");
    Assert.assertTrue(runProgram(app, AppWithTimePartitionedFileSet.PartitionWriter.class, new BasicArguments(runtimeArguments)));
    // this should have created a partition in the tpfs
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) tpfs).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            Partition partition = tpfs.getPartitionByTime(time5);
            Assert.assertNotNull(partition);
            String path = partition.getRelativePath();
            Assert.assertNotNull(path);
            Assert.assertTrue(path.contains("2015-01-15/11-20"));
        }
    });
    // now run a map/reduce that reads all the partitions
    runtimeArguments = Maps.newHashMap();
    Map<String, String> inputArgs = Maps.newHashMap();
    TimePartitionedFileSetArguments.setInputStartTime(inputArgs, time - TimeUnit.MINUTES.toMillis(5));
    TimePartitionedFileSetArguments.setInputEndTime(inputArgs, time5 + TimeUnit.MINUTES.toMillis(5));
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, TIME_PARTITIONED, inputArgs));
    runtimeArguments.put(AppWithTimePartitionedFileSet.ROW_TO_WRITE, "a");
    Assert.assertTrue(runProgram(app, AppWithTimePartitionedFileSet.PartitionReader.class, new BasicArguments(runtimeArguments)));
    // this should have read both partitions - and written both x and y to row a
    final Table output = datasetCache.getDataset(AppWithTimePartitionedFileSet.OUTPUT);
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) output).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            Row row = output.get(Bytes.toBytes("a"));
            Assert.assertEquals("1", row.getString("x"));
            Assert.assertEquals("2", row.getString("y"));
        }
    });
    // now run a map/reduce that reads a range of the partitions, namely the first one
    TimePartitionedFileSetArguments.setInputStartTime(inputArgs, time - TimeUnit.MINUTES.toMillis(5));
    TimePartitionedFileSetArguments.setInputEndTime(inputArgs, time + TimeUnit.MINUTES.toMillis(2));
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, TIME_PARTITIONED, inputArgs));
    runtimeArguments.put(AppWithTimePartitionedFileSet.ROW_TO_WRITE, "b");
    Assert.assertTrue(runProgram(app, AppWithTimePartitionedFileSet.PartitionReader.class, new BasicArguments(runtimeArguments)));
    // this should have read the first partition only - and written only x to row b
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) output).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            Row row = output.get(Bytes.toBytes("b"));
            Assert.assertEquals("1", row.getString("x"));
            Assert.assertNull(row.get("y"));
        }
    });
    // now run a map/reduce that reads no partitions (because the range matches nothing)
    TimePartitionedFileSetArguments.setInputStartTime(inputArgs, time - TimeUnit.MINUTES.toMillis(10));
    TimePartitionedFileSetArguments.setInputEndTime(inputArgs, time - TimeUnit.MINUTES.toMillis(9));
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, TIME_PARTITIONED, inputArgs));
    runtimeArguments.put(AppWithTimePartitionedFileSet.ROW_TO_WRITE, "n");
    Assert.assertTrue(runProgram(app, AppWithTimePartitionedFileSet.PartitionReader.class, new BasicArguments(runtimeArguments)));
    // this should have read no partitions - and written nothing to row n
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) output).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            Row row = output.get(Bytes.toBytes("n"));
            Assert.assertTrue(row.isEmpty());
        }
    });
}
Also used : Partition(io.cdap.cdap.api.dataset.lib.Partition) Table(io.cdap.cdap.api.dataset.table.Table) TransactionExecutor(org.apache.tephra.TransactionExecutor) ApplicationWithPrograms(io.cdap.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) TransactionAware(org.apache.tephra.TransactionAware) BasicArguments(io.cdap.cdap.internal.app.runtime.BasicArguments) TimePartitionDetail(io.cdap.cdap.api.dataset.lib.TimePartitionDetail) Row(io.cdap.cdap.api.dataset.table.Row) TimePartitionedFileSet(io.cdap.cdap.api.dataset.lib.TimePartitionedFileSet) Test(org.junit.Test)

Aggregations

TimePartitionDetail (io.cdap.cdap.api.dataset.lib.TimePartitionDetail)3 TimePartitionedFileSet (io.cdap.cdap.api.dataset.lib.TimePartitionedFileSet)2 TransactionAware (org.apache.tephra.TransactionAware)2 TransactionExecutor (org.apache.tephra.TransactionExecutor)2 Test (org.junit.Test)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 DataSetException (io.cdap.cdap.api.dataset.DataSetException)1 DatasetManagementException (io.cdap.cdap.api.dataset.DatasetManagementException)1 Partition (io.cdap.cdap.api.dataset.lib.Partition)1 Row (io.cdap.cdap.api.dataset.table.Row)1 Table (io.cdap.cdap.api.dataset.table.Table)1 ApplicationWithPrograms (io.cdap.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms)1 BasicArguments (io.cdap.cdap.internal.app.runtime.BasicArguments)1 UnauthorizedException (io.cdap.cdap.security.spi.authorization.UnauthorizedException)1 IOException (java.io.IOException)1 Date (java.util.Date)1 Map (java.util.Map)1 TransactionFailureException (org.apache.tephra.TransactionFailureException)1