Search in sources :

Example 6 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by caskdata.

the class DynamicSparkCompute method lazyInit.

// when checkpointing is enabled, and Spark is loading DStream operations from an existing checkpoint,
// delegate will be null and the initialize() method won't have been called. So we need to instantiate
// the delegate and initialize it.
private void lazyInit(final JavaSparkContext jsc) throws Exception {
    if (delegate == null) {
        PluginFunctionContext pluginFunctionContext = dynamicDriverContext.getPluginFunctionContext();
        delegate = pluginFunctionContext.createPlugin();
        final StageSpec stageSpec = pluginFunctionContext.getStageSpec();
        final JavaSparkExecutionContext sec = dynamicDriverContext.getSparkExecutionContext();
        Transactionals.execute(sec, new TxRunnable() {

            @Override
            public void run(DatasetContext datasetContext) throws Exception {
                PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec);
                SparkExecutionPluginContext sparkPluginContext = new BasicSparkExecutionPluginContext(sec, jsc, datasetContext, pipelineRuntime, stageSpec);
                delegate.initialize(sparkPluginContext);
            }
        }, Exception.class);
    }
}
Also used : BasicSparkExecutionPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkExecutionPluginContext) PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) BasicSparkExecutionPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkExecutionPluginContext) SparkExecutionPluginContext(io.cdap.cdap.etl.api.batch.SparkExecutionPluginContext) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) TxRunnable(io.cdap.cdap.api.TxRunnable) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) JavaSparkExecutionContext(io.cdap.cdap.api.spark.JavaSparkExecutionContext) DatasetContext(io.cdap.cdap.api.data.DatasetContext)

Example 7 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by caskdata.

the class DStreamCollection method compute.

@Override
public <U> SparkCollection<U> compute(StageSpec stageSpec, SparkCompute<T, U> compute) throws Exception {
    SparkCompute<T, U> wrappedCompute = new DynamicSparkCompute<>(new DynamicDriverContext(stageSpec, sec, new NoopStageStatisticsCollector()), compute);
    Transactionals.execute(sec, new TxRunnable() {

        @Override
        public void run(DatasetContext datasetContext) throws Exception {
            PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec);
            SparkExecutionPluginContext sparkPluginContext = new BasicSparkExecutionPluginContext(sec, JavaSparkContext.fromSparkContext(stream.context().sparkContext()), datasetContext, pipelineRuntime, stageSpec);
            wrappedCompute.initialize(sparkPluginContext);
        }
    }, Exception.class);
    return wrap(stream.transform(new ComputeTransformFunction<>(sec, stageSpec, wrappedCompute)));
}
Also used : DynamicSparkCompute(io.cdap.cdap.etl.spark.streaming.function.DynamicSparkCompute) NoopStageStatisticsCollector(io.cdap.cdap.etl.common.NoopStageStatisticsCollector) ComputeTransformFunction(io.cdap.cdap.etl.spark.streaming.function.ComputeTransformFunction) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) BasicSparkExecutionPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkExecutionPluginContext) BasicSparkExecutionPluginContext(io.cdap.cdap.etl.spark.batch.BasicSparkExecutionPluginContext) SparkExecutionPluginContext(io.cdap.cdap.etl.api.batch.SparkExecutionPluginContext) TxRunnable(io.cdap.cdap.api.TxRunnable) DatasetContext(io.cdap.cdap.api.data.DatasetContext)

Example 8 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by caskdata.

the class CharCountProgram method run.

@Override
public void run(final JavaSparkExecutionContext sec) throws Exception {
    JavaSparkContext sc = new JavaSparkContext();
    // Verify the codec is being set
    Preconditions.checkArgument("org.apache.spark.io.LZFCompressionCodec".equals(sc.getConf().get("spark.io.compression.codec")));
    // read the dataset
    JavaPairRDD<byte[], String> inputData = sec.fromDataset("keys");
    // create a new RDD with the same key but with a new value which is the length of the string
    final JavaPairRDD<byte[], byte[]> stringLengths = inputData.mapToPair(new PairFunction<Tuple2<byte[], String>, byte[], byte[]>() {

        @Override
        public Tuple2<byte[], byte[]> call(Tuple2<byte[], String> stringTuple2) throws Exception {
            return new Tuple2<>(stringTuple2._1(), Bytes.toBytes(stringTuple2._2().length()));
        }
    });
    // write a total count to a table (that emits a metric we can validate in the test case)
    sec.execute(new TxRunnable() {

        @Override
        public void run(DatasetContext context) throws Exception {
            long count = stringLengths.count();
            Table totals = context.getDataset("totals");
            totals.increment(new Increment("total").add("total", count));
            // write the character count to dataset
            sec.saveAsDataset(stringLengths, "count");
        }
    });
}
Also used : Table(io.cdap.cdap.api.dataset.table.Table) Tuple2(scala.Tuple2) TxRunnable(io.cdap.cdap.api.TxRunnable) Increment(io.cdap.cdap.api.dataset.table.Increment) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) DatasetContext(io.cdap.cdap.api.data.DatasetContext)

Example 9 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by caskdata.

the class SparkCSVToSpaceProgram method run.

@Override
public void run(final JavaSparkExecutionContext sec) throws Exception {
    JavaSparkContext jsc = new JavaSparkContext();
    Map<String, String> fileSetArgs = new HashMap<>();
    final Metrics metrics = sec.getMetrics();
    FileSetArguments.addInputPath(fileSetArgs, sec.getRuntimeArguments().get("input.path"));
    JavaPairRDD<LongWritable, Text> input = sec.fromDataset(WorkflowAppWithLocalDatasets.CSV_FILESET_DATASET, fileSetArgs);
    final List<String> converted = input.values().map(new Function<Text, String>() {

        @Override
        public String call(Text input) throws Exception {
            String line = input.toString();
            metrics.count("num.lines", 1);
            return line.replaceAll(",", " ");
        }
    }).collect();
    sec.execute(new TxRunnable() {

        @Override
        public void run(DatasetContext context) throws Exception {
            Map<String, String> args = sec.getRuntimeArguments();
            String outputPath = args.get("output.path");
            Map<String, String> fileSetArgs = new HashMap<>();
            FileSetArguments.setOutputPath(fileSetArgs, outputPath);
            FileSet fileSet = context.getDataset(WorkflowAppWithLocalDatasets.CSV_FILESET_DATASET, fileSetArgs);
            try (PrintWriter writer = new PrintWriter(fileSet.getOutputLocation().getOutputStream())) {
                for (String line : converted) {
                    writer.write(line);
                    writer.println();
                }
            }
        }
    });
}
Also used : FileSet(io.cdap.cdap.api.dataset.lib.FileSet) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) Function(org.apache.spark.api.java.function.Function) Metrics(io.cdap.cdap.api.metrics.Metrics) TxRunnable(io.cdap.cdap.api.TxRunnable) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) LongWritable(org.apache.hadoop.io.LongWritable) DatasetContext(io.cdap.cdap.api.data.DatasetContext) HashMap(java.util.HashMap) Map(java.util.Map) PrintWriter(java.io.PrintWriter)

Example 10 with TxRunnable

use of io.cdap.cdap.api.TxRunnable in project cdap by caskdata.

the class SparkLogParser method run.

@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
    JavaSparkContext jsc = new JavaSparkContext();
    Map<String, String> runtimeArguments = sec.getRuntimeArguments();
    String inputFileSet = runtimeArguments.get("input");
    final String outputTable = runtimeArguments.get("output");
    JavaPairRDD<LongWritable, Text> input = sec.fromDataset(inputFileSet);
    final JavaPairRDD<String, String> aggregated = input.mapToPair(new PairFunction<Tuple2<LongWritable, Text>, LogKey, LogStats>() {

        @Override
        public Tuple2<LogKey, LogStats> call(Tuple2<LongWritable, Text> input) throws Exception {
            return SparkAppUsingGetDataset.parse(input._2());
        }
    }).reduceByKey(new Function2<LogStats, LogStats, LogStats>() {

        @Override
        public LogStats call(LogStats stats1, LogStats stats2) throws Exception {
            return stats1.aggregate(stats2);
        }
    }).mapPartitionsToPair(new PairFlatMapFunction<Iterator<Tuple2<LogKey, LogStats>>, String, String>() {

        @Override
        public Iterator<Tuple2<String, String>> call(Iterator<Tuple2<LogKey, LogStats>> itor) throws Exception {
            final Gson gson = new Gson();
            return Lists.newArrayList(Iterators.transform(itor, new Function<Tuple2<LogKey, LogStats>, Tuple2<String, String>>() {

                @Override
                public Tuple2<String, String> apply(Tuple2<LogKey, LogStats> input) {
                    return new Tuple2<>(gson.toJson(input._1()), gson.toJson(input._2()));
                }
            })).iterator();
        }
    });
    // Collect all data to driver and write to dataset directly. That's the intend of the test.
    sec.execute(new TxRunnable() {

        @Override
        public void run(DatasetContext context) throws Exception {
            KeyValueTable kvTable = context.getDataset(outputTable);
            for (Map.Entry<String, String> entry : aggregated.collectAsMap().entrySet()) {
                kvTable.write(entry.getKey(), entry.getValue());
            }
        }
    });
}
Also used : LogKey(io.cdap.cdap.spark.app.SparkAppUsingGetDataset.LogKey) Gson(com.google.gson.Gson) Text(org.apache.hadoop.io.Text) Function2(org.apache.spark.api.java.function.Function2) LogStats(io.cdap.cdap.spark.app.SparkAppUsingGetDataset.LogStats) Function(com.google.common.base.Function) PairFlatMapFunction(org.apache.spark.api.java.function.PairFlatMapFunction) PairFunction(org.apache.spark.api.java.function.PairFunction) Tuple2(scala.Tuple2) TxRunnable(io.cdap.cdap.api.TxRunnable) KeyValueTable(io.cdap.cdap.api.dataset.lib.KeyValueTable) Iterator(java.util.Iterator) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) LongWritable(org.apache.hadoop.io.LongWritable) DatasetContext(io.cdap.cdap.api.data.DatasetContext)

Aggregations

TxRunnable (io.cdap.cdap.api.TxRunnable)32 DatasetContext (io.cdap.cdap.api.data.DatasetContext)30 PipelineRuntime (io.cdap.cdap.etl.common.PipelineRuntime)10 SparkPipelineRuntime (io.cdap.cdap.etl.spark.SparkPipelineRuntime)10 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)8 MacroEvaluator (io.cdap.cdap.api.macro.MacroEvaluator)6 PluginContext (io.cdap.cdap.api.plugin.PluginContext)6 SparkExecutionPluginContext (io.cdap.cdap.etl.api.batch.SparkExecutionPluginContext)6 BasicArguments (io.cdap.cdap.etl.common.BasicArguments)6 DefaultMacroEvaluator (io.cdap.cdap.etl.common.DefaultMacroEvaluator)6 StageSpec (io.cdap.cdap.etl.proto.v2.spec.StageSpec)6 TransactionFailureException (org.apache.tephra.TransactionFailureException)6 Put (io.cdap.cdap.api.dataset.table.Put)5 Table (io.cdap.cdap.api.dataset.table.Table)5 SparkPipelinePluginContext (io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext)5 IOException (java.io.IOException)5 AtomicReference (java.util.concurrent.atomic.AtomicReference)5 NoopStageStatisticsCollector (io.cdap.cdap.etl.common.NoopStageStatisticsCollector)4 DataSetException (io.cdap.cdap.api.dataset.DataSetException)3 FileSet (io.cdap.cdap.api.dataset.lib.FileSet)3