Search in sources :

Example 21 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class MapReduceProgramRunnerTest method testTransactionHandling.

/**
   * Tests that initialize() and getSplits() are called in the same transaction,
   * and with the same instance of the input dataset.
   */
@Test
public void testTransactionHandling() throws Exception {
    final ApplicationWithPrograms app = deployApp(AppWithTxAware.class);
    runProgram(app, AppWithTxAware.PedanticMapReduce.class, new BasicArguments(ImmutableMap.of("outputPath", TEMP_FOLDER_SUPPLIER.get().getPath() + "/output")));
}
Also used : ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) Test(org.junit.Test)

Example 22 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class MapReduceWithMultipleInputsTest method testMapperOutputTypeChecking.

@Test
public void testMapperOutputTypeChecking() throws Exception {
    final ApplicationWithPrograms app = deployApp(AppWithMapReduceUsingInconsistentMappers.class);
    // the mapreduce with consistent mapper types will succeed
    Assert.assertTrue(runProgram(app, AppWithMapReduceUsingInconsistentMappers.MapReduceWithConsistentMapperTypes.class, new BasicArguments()));
    // the mapreduce with mapper classes of inconsistent output types will fail, whether the mappers are set through
    // CDAP APIs or also directly on the job
    Assert.assertFalse(runProgram(app, AppWithMapReduceUsingInconsistentMappers.MapReduceWithInconsistentMapperTypes.class, new BasicArguments()));
    Assert.assertFalse(runProgram(app, AppWithMapReduceUsingInconsistentMappers.MapReduceWithInconsistentMapperTypes2.class, new BasicArguments()));
}
Also used : ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) Test(org.junit.Test)

Example 23 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class MapReduceWithMultipleInputsTest method testAddingMultipleInputsWithSameAlias.

@Test
public void testAddingMultipleInputsWithSameAlias() throws Exception {
    final ApplicationWithPrograms app = deployApp(AppWithMapReduceUsingMultipleInputs.class);
    // will fail because it configured two inputs with the same alias
    Assert.assertFalse(runProgram(app, AppWithMapReduceUsingMultipleInputs.InvalidMapReduce.class, new BasicArguments()));
}
Also used : ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) Test(org.junit.Test)

Example 24 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class MapReduceWithPartitionedTest method testPartitionedFileSetWithMR.

@Test
public void testPartitionedFileSetWithMR() throws Exception {
    final ApplicationWithPrograms app = deployApp(AppWithPartitionedFileSet.class);
    // write a value to the input table
    final Table table = datasetCache.getDataset(AppWithPartitionedFileSet.INPUT);
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) table).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            table.put(Bytes.toBytes("x"), AppWithPartitionedFileSet.ONLY_COLUMN, Bytes.toBytes("1"));
        }
    });
    // a partition key for the map/reduce output
    final PartitionKey keyX = PartitionKey.builder().addStringField("type", "x").addLongField("time", 150000L).build();
    // run the partition writer m/r with this output partition time
    Map<String, String> runtimeArguments = Maps.newHashMap();
    Map<String, String> outputArgs = Maps.newHashMap();
    PartitionedFileSetArguments.setOutputPartitionKey(outputArgs, keyX);
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, PARTITIONED, outputArgs));
    Assert.assertTrue(runProgram(app, AppWithPartitionedFileSet.PartitionWriter.class, new BasicArguments(runtimeArguments)));
    // this should have created a partition in the tpfs
    final PartitionedFileSet dataset = datasetCache.getDataset(PARTITIONED);
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) dataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            Partition partition = dataset.getPartition(keyX);
            Assert.assertNotNull(partition);
            String path = partition.getRelativePath();
            Assert.assertTrue(path.contains("x"));
            Assert.assertTrue(path.contains("150000"));
        }
    });
    // delete the data in the input table and write a new row
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) table).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            table.delete(Bytes.toBytes("x"));
            table.put(Bytes.toBytes("y"), AppWithPartitionedFileSet.ONLY_COLUMN, Bytes.toBytes("2"));
        }
    });
    // a new partition key for the next map/reduce
    final PartitionKey keyY = PartitionKey.builder().addStringField("type", "y").addLongField("time", 200000L).build();
    // now run the m/r again with a new partition time, say 5 minutes later
    PartitionedFileSetArguments.setOutputPartitionKey(outputArgs, keyY);
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, PARTITIONED, outputArgs));
    Assert.assertTrue(runProgram(app, AppWithPartitionedFileSet.PartitionWriter.class, new BasicArguments(runtimeArguments)));
    // this should have created a partition in the tpfs
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) dataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            Partition partition = dataset.getPartition(keyY);
            Assert.assertNotNull(partition);
            String path = partition.getRelativePath();
            Assert.assertNotNull(path);
            Assert.assertTrue(path.contains("y"));
            Assert.assertTrue(path.contains("200000"));
        }
    });
    // a partition filter that matches the outputs of both map/reduces
    PartitionFilter filterXY = PartitionFilter.builder().addRangeCondition("type", "x", "z").build();
    // now run a map/reduce that reads all the partitions
    runtimeArguments = Maps.newHashMap();
    Map<String, String> inputArgs = Maps.newHashMap();
    PartitionedFileSetArguments.setInputPartitionFilter(inputArgs, filterXY);
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, PARTITIONED, inputArgs));
    runtimeArguments.put(AppWithPartitionedFileSet.ROW_TO_WRITE, "a");
    Assert.assertTrue(runProgram(app, AppWithPartitionedFileSet.PartitionReader.class, new BasicArguments(runtimeArguments)));
    // this should have read both partitions - and written both x and y to row a
    final Table output = datasetCache.getDataset(AppWithPartitionedFileSet.OUTPUT);
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) output).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            Row row = output.get(Bytes.toBytes("a"));
            Assert.assertEquals("1", row.getString("x"));
            Assert.assertEquals("2", row.getString("y"));
        }
    });
    // a partition filter that matches the output key of the first map/reduce
    PartitionFilter filterX = PartitionFilter.builder().addValueCondition("type", "x").addRangeCondition("time", null, 160000L).build();
    // now run a map/reduce that reads a range of the partitions, namely the first one
    inputArgs.clear();
    PartitionedFileSetArguments.setInputPartitionFilter(inputArgs, filterX);
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, PARTITIONED, inputArgs));
    runtimeArguments.put(AppWithPartitionedFileSet.ROW_TO_WRITE, "b");
    Assert.assertTrue(runProgram(app, AppWithPartitionedFileSet.PartitionReader.class, new BasicArguments(runtimeArguments)));
    // this should have read the first partition only - and written only x to row b
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) output).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            Row row = output.get(Bytes.toBytes("b"));
            Assert.assertEquals("1", row.getString("x"));
            Assert.assertNull(row.get("y"));
        }
    });
    // a partition filter that matches no key
    PartitionFilter filterMT = PartitionFilter.builder().addValueCondition("type", "nosuchthing").build();
    // now run a map/reduce that reads an empty range of partitions (the filter matches nothing)
    inputArgs.clear();
    PartitionedFileSetArguments.setInputPartitionFilter(inputArgs, filterMT);
    runtimeArguments.putAll(RuntimeArguments.addScope(Scope.DATASET, PARTITIONED, inputArgs));
    runtimeArguments.put(AppWithPartitionedFileSet.ROW_TO_WRITE, "n");
    Assert.assertTrue(runProgram(app, AppWithPartitionedFileSet.PartitionReader.class, new BasicArguments(runtimeArguments)));
    // this should have read no partitions - and written nothing to row n
    Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) output).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() {
            Row row = output.get(Bytes.toBytes("n"));
            Assert.assertTrue(row.isEmpty());
        }
    });
}
Also used : Partition(co.cask.cdap.api.dataset.lib.Partition) Table(co.cask.cdap.api.dataset.table.Table) TransactionExecutor(org.apache.tephra.TransactionExecutor) TimePartitionedFileSet(co.cask.cdap.api.dataset.lib.TimePartitionedFileSet) PartitionedFileSet(co.cask.cdap.api.dataset.lib.PartitionedFileSet) PartitionFilter(co.cask.cdap.api.dataset.lib.PartitionFilter) ApplicationWithPrograms(co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms) TransactionAware(org.apache.tephra.TransactionAware) PartitionKey(co.cask.cdap.api.dataset.lib.PartitionKey) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) Row(co.cask.cdap.api.dataset.table.Row) Test(org.junit.Test)

Example 25 with BasicArguments

use of co.cask.cdap.internal.app.runtime.BasicArguments in project cdap by caskdata.

the class DefaultProgramWorkflowRunner method getProgramRunnable.

/**
   * Gets a {@link Runnable} for the {@link Program}.
   *
   * @param name    name of the {@link Program}
   * @param program the {@link Program}
   * @return a {@link Runnable} for this {@link Program}
   */
private Runnable getProgramRunnable(String name, final ProgramRunner programRunner, final Program program) {
    Map<String, String> systemArgumentsMap = new HashMap<>(workflowProgramOptions.getArguments().asMap());
    // Generate the new RunId here for the program running under Workflow
    systemArgumentsMap.put(ProgramOptionConstants.RUN_ID, RunIds.generate().getId());
    // Add Workflow specific system arguments to be passed to the underlying program
    systemArgumentsMap.put(ProgramOptionConstants.WORKFLOW_NAME, workflowSpec.getName());
    systemArgumentsMap.put(ProgramOptionConstants.WORKFLOW_RUN_ID, ProgramRunners.getRunId(workflowProgramOptions).getId());
    systemArgumentsMap.put(ProgramOptionConstants.WORKFLOW_NODE_ID, nodeId);
    systemArgumentsMap.put(ProgramOptionConstants.PROGRAM_NAME_IN_WORKFLOW, name);
    systemArgumentsMap.put(ProgramOptionConstants.WORKFLOW_TOKEN, GSON.toJson(token));
    final ProgramOptions options = new SimpleProgramOptions(program.getName(), new BasicArguments(Collections.unmodifiableMap(systemArgumentsMap)), new BasicArguments(RuntimeArguments.extractScope(program.getType().getScope(), name, workflowProgramOptions.getUserArguments().asMap())));
    return new Runnable() {

        @Override
        public void run() {
            try {
                runAndWait(programRunner, program, options);
            } catch (Exception e) {
                throw Throwables.propagate(e);
            }
        }
    };
}
Also used : HashMap(java.util.HashMap) SimpleProgramOptions(co.cask.cdap.internal.app.runtime.SimpleProgramOptions) BasicArguments(co.cask.cdap.internal.app.runtime.BasicArguments) SimpleProgramOptions(co.cask.cdap.internal.app.runtime.SimpleProgramOptions) ProgramOptions(co.cask.cdap.app.runtime.ProgramOptions) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

BasicArguments (co.cask.cdap.internal.app.runtime.BasicArguments)30 ApplicationWithPrograms (co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms)18 Test (org.junit.Test)17 SimpleProgramOptions (co.cask.cdap.internal.app.runtime.SimpleProgramOptions)11 ProgramDescriptor (co.cask.cdap.app.program.ProgramDescriptor)10 ProgramController (co.cask.cdap.app.runtime.ProgramController)9 File (java.io.File)5 TransactionExecutor (org.apache.tephra.TransactionExecutor)5 KeyValueTable (co.cask.cdap.api.dataset.lib.KeyValueTable)4 AbstractListener (co.cask.cdap.internal.app.runtime.AbstractListener)4 IOException (java.io.IOException)4 TransactionAware (org.apache.tephra.TransactionAware)4 Location (org.apache.twill.filesystem.Location)4 FileSet (co.cask.cdap.api.dataset.lib.FileSet)3 Arguments (co.cask.cdap.app.runtime.Arguments)3 ProgramOptions (co.cask.cdap.app.runtime.ProgramOptions)3 RandomEndpointStrategy (co.cask.cdap.common.discovery.RandomEndpointStrategy)3 ImmutableMap (com.google.common.collect.ImmutableMap)3 Discoverable (org.apache.twill.discovery.Discoverable)3 DiscoveryServiceClient (org.apache.twill.discovery.DiscoveryServiceClient)3