Search in sources :

Example 6 with SparkPipelinePluginContext

use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by cdapio.

the class StreamingMultiSinkFunction method call.

@Override
public void call(JavaRDD<RecordInfo<Object>> data, Time batchTime) throws Exception {
    long logicalStartTime = batchTime.milliseconds();
    MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
    PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
    SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
    PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
    Map<String, SubmitterLifecycle<?>> stages = createStages(evaluator);
    // call prepareRun() on all the stages in the group
    // need to call it in an order that guarantees that inputs are called before outputs
    // this is because plugins can call getArguments().set() in the prepareRun() method,
    // which downstream stages should be able to read
    List<String> traversalOrder = new ArrayList(group.size());
    for (String stageName : phaseSpec.getPhase().getDag().getTopologicalOrder()) {
        if (group.contains(stageName)) {
            traversalOrder.add(stageName);
        }
    }
    for (String stageName : traversalOrder) {
        SubmitterLifecycle<?> plugin = stages.get(stageName);
        StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
        try {
            prepareRun(pipelineRuntime, sinkFactory, stageSpec, plugin);
        } catch (Exception e) {
            LOG.error("Error preparing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
            return;
        }
    }
    // run the actual transforms and sinks in this group
    boolean ranSuccessfully = true;
    try {
        MultiSinkFunction multiSinkFunction = new MultiSinkFunction(sec, phaseSpec, group, collectors);
        Set<String> outputNames = sinkFactory.writeCombinedRDD(data.flatMapToPair(multiSinkFunction), sec, sinkNames);
        sec.execute(new TxRunnable() {

            @Override
            public void run(DatasetContext context) throws Exception {
                for (String outputName : outputNames) {
                    ExternalDatasets.registerLineage(sec.getAdmin(), outputName, AccessType.WRITE, null, () -> context.getDataset(outputName));
                }
            }
        });
    } catch (Exception e) {
        LOG.error("Error writing to sinks {} for the batch for time {}.", sinkNames, logicalStartTime, e);
        ranSuccessfully = false;
    }
    // run onRunFinish() for each sink
    for (String stageName : traversalOrder) {
        SubmitterLifecycle<?> plugin = stages.get(stageName);
        StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
        try {
            onRunFinish(pipelineRuntime, sinkFactory, stageSpec, plugin, ranSuccessfully);
        } catch (Exception e) {
            LOG.warn("Unable to execute onRunFinish for sink {}", stageName, e);
        }
    }
}
Also used : SubmitterLifecycle(io.cdap.cdap.etl.api.SubmitterLifecycle) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) PipelineRuntime(io.cdap.cdap.etl.common.PipelineRuntime) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(io.cdap.cdap.api.plugin.PluginContext) SparkPipelineRuntime(io.cdap.cdap.etl.spark.SparkPipelineRuntime) ArrayList(java.util.ArrayList) MultiSinkFunction(io.cdap.cdap.etl.spark.function.MultiSinkFunction) TransactionFailureException(org.apache.tephra.TransactionFailureException) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) SparkBatchSinkFactory(io.cdap.cdap.etl.spark.batch.SparkBatchSinkFactory) TxRunnable(io.cdap.cdap.api.TxRunnable) StageSpec(io.cdap.cdap.etl.proto.v2.spec.StageSpec) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) DatasetContext(io.cdap.cdap.api.data.DatasetContext)

Example 7 with SparkPipelinePluginContext

use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by cdapio.

the class SparkStreamingPipelineRunner method getSource.

@Override
protected SparkCollection<RecordInfo<Object>> getSource(StageSpec stageSpec, FunctionCache.Factory functionCacheFactory, StageStatisticsCollector collector) throws Exception {
    StreamingSource<Object> source;
    if (checkpointsDisabled) {
        PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
        source = pluginFunctionContext.createPlugin();
    } else {
        // check for macros in any StreamingSource. If checkpoints are enabled,
        // SparkStreaming will serialize all InputDStreams created in the checkpoint, which means
        // the InputDStream is deserialized directly from the checkpoint instead of instantiated through CDAP.
        // This means there isn't any way for us to perform macro evaluation on sources when they are loaded from
        // checkpoints. We can work around this in all other pipeline stages by dynamically instantiating the
        // plugin in all DStream functions, but can't for InputDStreams because the InputDStream constructor
        // adds itself to the context dag. Yay for constructors with global side effects.
        // TODO: (HYDRATOR-1030) figure out how to do this at configure time instead of run time
        MacroEvaluator macroEvaluator = new ErrorMacroEvaluator("Due to spark limitations, macro evaluation is not allowed in streaming sources when checkpointing " + "is enabled.");
        PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), spec.isStageLoggingEnabled(), spec.isProcessTimingEnabled());
        source = pluginContext.newPluginInstance(stageSpec.getName(), macroEvaluator);
    }
    DataTracer dataTracer = sec.getDataTracer(stageSpec.getName());
    StreamingContext sourceContext = new DefaultStreamingContext(stageSpec, sec, streamingContext);
    JavaDStream<Object> javaDStream = source.getStream(sourceContext);
    if (dataTracer.isEnabled()) {
        // it will create a new function for each RDD, which would limit each RDD but not the entire DStream.
        javaDStream = javaDStream.transform(new LimitingFunction<>(spec.getNumOfRecordsPreview()));
    }
    JavaDStream<RecordInfo<Object>> outputDStream = javaDStream.transform(new CountingTransformFunction<>(stageSpec.getName(), sec.getMetrics(), "records.out", dataTracer)).map(new WrapOutputTransformFunction<>(stageSpec.getName()));
    return new DStreamCollection<>(sec, functionCacheFactory, outputDStream);
}
Also used : DStreamCollection(io.cdap.cdap.etl.spark.streaming.DStreamCollection) PairDStreamCollection(io.cdap.cdap.etl.spark.streaming.PairDStreamCollection) StreamingContext(io.cdap.cdap.etl.api.streaming.StreamingContext) JavaStreamingContext(org.apache.spark.streaming.api.java.JavaStreamingContext) DefaultStreamingContext(io.cdap.cdap.etl.spark.streaming.DefaultStreamingContext) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(io.cdap.cdap.api.plugin.PluginContext) RecordInfo(io.cdap.cdap.etl.common.RecordInfo) CountingTransformFunction(io.cdap.cdap.etl.spark.streaming.function.CountingTransformFunction) DefaultStreamingContext(io.cdap.cdap.etl.spark.streaming.DefaultStreamingContext) PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) DataTracer(io.cdap.cdap.api.preview.DataTracer) LimitingFunction(io.cdap.cdap.etl.spark.streaming.function.preview.LimitingFunction)

Example 8 with SparkPipelinePluginContext

use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by cdapio.

the class JavaSparkMainWrapper method run.

@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
    String stageName = sec.getSpecification().getProperty(ExternalSparkProgram.STAGE_NAME);
    BatchPhaseSpec batchPhaseSpec = GSON.fromJson(sec.getSpecification().getProperty(Constants.PIPELINEID), BatchPhaseSpec.class);
    PipelinePluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), batchPhaseSpec.isStageLoggingEnabled(), batchPhaseSpec.isProcessTimingEnabled());
    Class<?> mainClass = pluginContext.loadPluginClass(stageName);
    // if it's a CDAP JavaSparkMain, instantiate it and call the run method
    if (JavaSparkMain.class.isAssignableFrom(mainClass)) {
        MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(new BasicArguments(sec), sec.getLogicalStartTime(), sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
        JavaSparkMain javaSparkMain = pluginContext.newPluginInstance(stageName, macroEvaluator);
        javaSparkMain.run(sec);
    } else {
        // otherwise, assume there is a 'main' method and call it
        String programArgs = getProgramArgs(sec, stageName);
        String[] args = programArgs == null ? RuntimeArguments.toPosixArray(sec.getRuntimeArguments()) : programArgs.split(" ");
        final Method mainMethod = mainClass.getMethod("main", String[].class);
        final Object[] methodArgs = new Object[1];
        methodArgs[0] = args;
        Caller caller = pluginContext.getCaller(stageName);
        caller.call(new Callable<Void>() {

            @Override
            public Void call() throws Exception {
                mainMethod.invoke(null, methodArgs);
                return null;
            }
        });
    }
}
Also used : DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) Method(java.lang.reflect.Method) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) Caller(io.cdap.cdap.etl.common.plugin.Caller) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) BatchPhaseSpec(io.cdap.cdap.etl.batch.BatchPhaseSpec) JavaSparkMain(io.cdap.cdap.api.spark.JavaSparkMain) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) PipelinePluginContext(io.cdap.cdap.etl.common.plugin.PipelinePluginContext) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext)

Example 9 with SparkPipelinePluginContext

use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.

the class JavaSparkMainWrapper method run.

@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
    String stageName = sec.getSpecification().getProperty(ExternalSparkProgram.STAGE_NAME);
    BatchPhaseSpec batchPhaseSpec = GSON.fromJson(sec.getSpecification().getProperty(Constants.PIPELINEID), BatchPhaseSpec.class);
    PipelinePluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), batchPhaseSpec.isStageLoggingEnabled(), batchPhaseSpec.isProcessTimingEnabled());
    Class<?> mainClass = pluginContext.loadPluginClass(stageName);
    // if it's a CDAP JavaSparkMain, instantiate it and call the run method
    if (JavaSparkMain.class.isAssignableFrom(mainClass)) {
        MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(new BasicArguments(sec), sec.getLogicalStartTime(), sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
        JavaSparkMain javaSparkMain = pluginContext.newPluginInstance(stageName, macroEvaluator);
        javaSparkMain.run(sec);
    } else {
        // otherwise, assume there is a 'main' method and call it
        String programArgs = getProgramArgs(sec, stageName);
        String[] args = programArgs == null ? RuntimeArguments.toPosixArray(sec.getRuntimeArguments()) : programArgs.split(" ");
        final Method mainMethod = mainClass.getMethod("main", String[].class);
        final Object[] methodArgs = new Object[1];
        methodArgs[0] = args;
        Caller caller = pluginContext.getCaller(stageName);
        caller.call(new Callable<Void>() {

            @Override
            public Void call() throws Exception {
                mainMethod.invoke(null, methodArgs);
                return null;
            }
        });
    }
}
Also used : DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) Method(java.lang.reflect.Method) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) Caller(io.cdap.cdap.etl.common.plugin.Caller) DefaultMacroEvaluator(io.cdap.cdap.etl.common.DefaultMacroEvaluator) BatchPhaseSpec(io.cdap.cdap.etl.batch.BatchPhaseSpec) JavaSparkMain(io.cdap.cdap.api.spark.JavaSparkMain) BasicArguments(io.cdap.cdap.etl.common.BasicArguments) PipelinePluginContext(io.cdap.cdap.etl.common.plugin.PipelinePluginContext) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext)

Example 10 with SparkPipelinePluginContext

use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.

the class SparkStreamingPipelineRunner method getSource.

@Override
protected SparkCollection<RecordInfo<Object>> getSource(StageSpec stageSpec, FunctionCache.Factory functionCacheFactory, StageStatisticsCollector collector) throws Exception {
    StreamingSource<Object> source;
    if (checkpointsDisabled) {
        PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
        source = pluginFunctionContext.createPlugin();
    } else {
        // check for macros in any StreamingSource. If checkpoints are enabled,
        // SparkStreaming will serialize all InputDStreams created in the checkpoint, which means
        // the InputDStream is deserialized directly from the checkpoint instead of instantiated through CDAP.
        // This means there isn't any way for us to perform macro evaluation on sources when they are loaded from
        // checkpoints. We can work around this in all other pipeline stages by dynamically instantiating the
        // plugin in all DStream functions, but can't for InputDStreams because the InputDStream constructor
        // adds itself to the context dag. Yay for constructors with global side effects.
        // TODO: (HYDRATOR-1030) figure out how to do this at configure time instead of run time
        MacroEvaluator macroEvaluator = new ErrorMacroEvaluator("Due to spark limitations, macro evaluation is not allowed in streaming sources when checkpointing " + "is enabled.");
        PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), spec.isStageLoggingEnabled(), spec.isProcessTimingEnabled());
        source = pluginContext.newPluginInstance(stageSpec.getName(), macroEvaluator);
    }
    DataTracer dataTracer = sec.getDataTracer(stageSpec.getName());
    StreamingContext sourceContext = new DefaultStreamingContext(stageSpec, sec, streamingContext);
    JavaDStream<Object> javaDStream = source.getStream(sourceContext);
    if (dataTracer.isEnabled()) {
        // it will create a new function for each RDD, which would limit each RDD but not the entire DStream.
        javaDStream = javaDStream.transform(new LimitingFunction<>(spec.getNumOfRecordsPreview()));
    }
    JavaDStream<RecordInfo<Object>> outputDStream = javaDStream.transform(new CountingTransformFunction<>(stageSpec.getName(), sec.getMetrics(), "records.out", dataTracer)).map(new WrapOutputTransformFunction<>(stageSpec.getName()));
    return new DStreamCollection<>(sec, functionCacheFactory, outputDStream);
}
Also used : DStreamCollection(io.cdap.cdap.etl.spark.streaming.DStreamCollection) PairDStreamCollection(io.cdap.cdap.etl.spark.streaming.PairDStreamCollection) StreamingContext(io.cdap.cdap.etl.api.streaming.StreamingContext) JavaStreamingContext(org.apache.spark.streaming.api.java.JavaStreamingContext) DefaultStreamingContext(io.cdap.cdap.etl.spark.streaming.DefaultStreamingContext) MacroEvaluator(io.cdap.cdap.api.macro.MacroEvaluator) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) PluginContext(io.cdap.cdap.api.plugin.PluginContext) RecordInfo(io.cdap.cdap.etl.common.RecordInfo) CountingTransformFunction(io.cdap.cdap.etl.spark.streaming.function.CountingTransformFunction) DefaultStreamingContext(io.cdap.cdap.etl.spark.streaming.DefaultStreamingContext) PluginFunctionContext(io.cdap.cdap.etl.spark.function.PluginFunctionContext) SparkPipelinePluginContext(io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext) DataTracer(io.cdap.cdap.api.preview.DataTracer) LimitingFunction(io.cdap.cdap.etl.spark.streaming.function.preview.LimitingFunction)

Aggregations

SparkPipelinePluginContext (io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext)14 MacroEvaluator (io.cdap.cdap.api.macro.MacroEvaluator)12 PluginContext (io.cdap.cdap.api.plugin.PluginContext)10 BasicArguments (io.cdap.cdap.etl.common.BasicArguments)10 DefaultMacroEvaluator (io.cdap.cdap.etl.common.DefaultMacroEvaluator)10 PipelineRuntime (io.cdap.cdap.etl.common.PipelineRuntime)8 SparkPipelineRuntime (io.cdap.cdap.etl.spark.SparkPipelineRuntime)8 TxRunnable (io.cdap.cdap.api.TxRunnable)6 DatasetContext (io.cdap.cdap.api.data.DatasetContext)6 PipelinePluginContext (io.cdap.cdap.etl.common.plugin.PipelinePluginContext)4 StageSpec (io.cdap.cdap.etl.proto.v2.spec.StageSpec)4 SparkBatchSinkFactory (io.cdap.cdap.etl.spark.batch.SparkBatchSinkFactory)4 PluginFunctionContext (io.cdap.cdap.etl.spark.function.PluginFunctionContext)4 TransactionPolicy (io.cdap.cdap.api.annotation.TransactionPolicy)2 DataTracer (io.cdap.cdap.api.preview.DataTracer)2 JavaSparkMain (io.cdap.cdap.api.spark.JavaSparkMain)2 SparkClientContext (io.cdap.cdap.api.spark.SparkClientContext)2 Alert (io.cdap.cdap.etl.api.Alert)2 AlertPublisher (io.cdap.cdap.etl.api.AlertPublisher)2 AlertPublisherContext (io.cdap.cdap.etl.api.AlertPublisherContext)2