use of co.cask.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class SparkStreamingPipelineRunner method getSource.
@Override
protected SparkCollection<RecordInfo<Object>> getSource(StageSpec stageSpec, StageStatisticsCollector collector) throws Exception {
StreamingSource<Object> source;
if (checkpointsDisabled) {
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
source = pluginFunctionContext.createPlugin();
} else {
// check for macros in any StreamingSource. If checkpoints are enabled,
// SparkStreaming will serialize all InputDStreams created in the checkpoint, which means
// the InputDStream is deserialized directly from the checkpoint instead of instantiated through CDAP.
// This means there isn't any way for us to perform macro evaluation on sources when they are loaded from
// checkpoints. We can work around this in all other pipeline stages by dynamically instantiating the
// plugin in all DStream functions, but can't for InputDStreams because the InputDStream constructor
// adds itself to the context dag. Yay for constructors with global side effects.
// TODO: (HYDRATOR-1030) figure out how to do this at configure time instead of run time
MacroEvaluator macroEvaluator = new ErrorMacroEvaluator("Due to spark limitations, macro evaluation is not allowed in streaming sources when checkpointing " + "is enabled.");
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), spec.isStageLoggingEnabled(), spec.isProcessTimingEnabled());
source = pluginContext.newPluginInstance(stageSpec.getName(), macroEvaluator);
}
DataTracer dataTracer = sec.getDataTracer(stageSpec.getName());
StreamingContext sourceContext = new DefaultStreamingContext(stageSpec, sec, streamingContext);
JavaDStream<Object> javaDStream = source.getStream(sourceContext);
if (dataTracer.isEnabled()) {
// it will create a new function for each RDD, which would limit each RDD but not the entire DStream.
javaDStream = javaDStream.transform(new LimitingFunction<>(spec.getNumOfRecordsPreview()));
}
JavaDStream<RecordInfo<Object>> outputDStream = javaDStream.transform(new CountingTransformFunction<>(stageSpec.getName(), sec.getMetrics(), "records.out", dataTracer)).map(new WrapOutputTransformFunction<>(stageSpec.getName()));
return new DStreamCollection<>(sec, outputDStream);
}
use of co.cask.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class JavaSparkMainWrapper method run.
@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
String stageName = sec.getSpecification().getProperty(ExternalSparkProgram.STAGE_NAME);
BatchPhaseSpec batchPhaseSpec = GSON.fromJson(sec.getSpecification().getProperty(Constants.PIPELINEID), BatchPhaseSpec.class);
PipelinePluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), batchPhaseSpec.isStageLoggingEnabled(), batchPhaseSpec.isProcessTimingEnabled());
Class<?> mainClass = pluginContext.loadPluginClass(stageName);
// if it's a CDAP JavaSparkMain, instantiate it and call the run method
if (JavaSparkMain.class.isAssignableFrom(mainClass)) {
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(new BasicArguments(sec), sec.getLogicalStartTime(), sec.getSecureStore(), sec.getNamespace());
JavaSparkMain javaSparkMain = pluginContext.newPluginInstance(stageName, macroEvaluator);
javaSparkMain.run(sec);
} else {
// otherwise, assume there is a 'main' method and call it
String programArgs = getProgramArgs(sec, stageName);
String[] args = programArgs == null ? RuntimeArguments.toPosixArray(sec.getRuntimeArguments()) : programArgs.split(" ");
final Method mainMethod = mainClass.getMethod("main", String[].class);
final Object[] methodArgs = new Object[1];
methodArgs[0] = args;
Caller caller = pluginContext.getCaller(stageName);
caller.call(new Callable<Void>() {
@Override
public Void call() throws Exception {
mainMethod.invoke(null, methodArgs);
return null;
}
});
}
}
use of co.cask.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class StreamingBatchSinkFunction method call.
@Override
public Void call(JavaRDD<T> data, Time batchTime) throws Exception {
if (data.isEmpty()) {
return null;
}
final long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getNamespace());
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageSpec.isStageLoggingEnabled(), stageSpec.isProcessTimingEnabled());
final SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
final String stageName = stageSpec.getName();
final BatchSink<Object, Object, Object> batchSink = pluginContext.newPluginInstance(stageName, evaluator);
final PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
boolean isPrepared = false;
boolean isDone = false;
try {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
batchSink.prepareRun(sinkContext);
}
});
isPrepared = true;
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, pipelineRuntime.getArguments().asMap(), batchTime.milliseconds(), new NoopStageStatisticsCollector());
PairFlatMapFunc<T, Object, Object> sinkFunction = new BatchSinkFunction<T, Object, Object>(pluginFunctionContext);
sinkFactory.writeFromRDD(data.flatMapToPair(Compat.convert(sinkFunction)), sec, stageName, Object.class, Object.class);
isDone = true;
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
batchSink.onRunFinish(true, sinkContext);
}
});
} catch (Exception e) {
LOG.error("Error writing to sink {} for the batch for time {}.", stageName, logicalStartTime, e);
} finally {
if (isPrepared && !isDone) {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
batchSink.onRunFinish(false, sinkContext);
}
});
}
}
return null;
}
use of co.cask.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class StreamingSparkSinkFunction method call.
@Override
public Void call(JavaRDD<T> data, Time batchTime) throws Exception {
if (data.isEmpty()) {
return null;
}
final long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getNamespace());
final PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageSpec.isStageLoggingEnabled(), stageSpec.isProcessTimingEnabled());
final PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, batchTime.milliseconds());
final String stageName = stageSpec.getName();
final SparkSink<T> sparkSink = pluginContext.newPluginInstance(stageName, evaluator);
boolean isPrepared = false;
boolean isDone = false;
try {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
sparkSink.prepareRun(context);
}
});
isPrepared = true;
final SparkExecutionPluginContext sparkExecutionPluginContext = new SparkStreamingExecutionContext(sec, JavaSparkContext.fromSparkContext(data.rdd().context()), logicalStartTime, stageSpec);
final JavaRDD<T> countedRDD = data.map(new CountingFunction<T>(stageName, sec.getMetrics(), "records.in", null)).cache();
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
sparkSink.run(sparkExecutionPluginContext, countedRDD);
}
});
isDone = true;
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
sparkSink.onRunFinish(true, context);
}
});
} catch (Exception e) {
LOG.error("Error while executing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
} finally {
if (isPrepared && !isDone) {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
sparkSink.onRunFinish(false, context);
}
});
}
}
return null;
}
use of co.cask.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class SparkStreamingPipelineRunner method getSource.
@Override
protected SparkCollection<Tuple2<Boolean, Object>> getSource(StageInfo stageInfo) throws Exception {
StreamingSource<Object> source;
if (checkpointsDisabled) {
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageInfo, sec);
source = pluginFunctionContext.createPlugin();
} else {
// check for macros in any StreamingSource. If checkpoints are enabled,
// SparkStreaming will serialize all InputDStreams created in the checkpoint, which means
// the InputDStream is deserialized directly from the checkpoint instead of instantiated through CDAP.
// This means there isn't any way for us to perform macro evaluation on sources when they are loaded from
// checkpoints. We can work around this in all other pipeline stages by dynamically instantiating the
// plugin in all DStream functions, but can't for InputDStreams because the InputDStream constructor
// adds itself to the context dag. Yay for constructors with global side effects.
// TODO: (HYDRATOR-1030) figure out how to do this at configure time instead of run time
MacroEvaluator macroEvaluator = new ErrorMacroEvaluator("Due to spark limitations, macro evaluation is not allowed in streaming sources when checkpointing " + "is enabled.");
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), spec.isStageLoggingEnabled(), spec.isProcessTimingEnabled());
source = pluginContext.newPluginInstance(stageInfo.getName(), macroEvaluator);
}
DataTracer dataTracer = sec.getDataTracer(stageInfo.getName());
StreamingContext sourceContext = new DefaultStreamingContext(stageInfo, sec, streamingContext);
JavaDStream<Object> javaDStream = source.getStream(sourceContext);
if (dataTracer.isEnabled()) {
// it will create a new function for each RDD, which would limit each RDD but not the entire DStream.
javaDStream = javaDStream.transform(new LimitingFunction<>(spec.getNumOfRecordsPreview()));
}
JavaDStream<Tuple2<Boolean, Object>> outputDStream = javaDStream.transform(new CountingTransformFunction<>(stageInfo.getName(), sec.getMetrics(), "records.out", dataTracer)).map(new WrapOutputTransformFunction<>());
return new DStreamCollection<>(sec, outputDStream);
}
Aggregations