use of co.cask.cdap.api.preview.DataTracer in project cdap by caskdata.
the class SparkStreamingPipelineRunner method getSource.
@Override
protected SparkCollection<RecordInfo<Object>> getSource(StageSpec stageSpec, StageStatisticsCollector collector) throws Exception {
StreamingSource<Object> source;
if (checkpointsDisabled) {
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
source = pluginFunctionContext.createPlugin();
} else {
// check for macros in any StreamingSource. If checkpoints are enabled,
// SparkStreaming will serialize all InputDStreams created in the checkpoint, which means
// the InputDStream is deserialized directly from the checkpoint instead of instantiated through CDAP.
// This means there isn't any way for us to perform macro evaluation on sources when they are loaded from
// checkpoints. We can work around this in all other pipeline stages by dynamically instantiating the
// plugin in all DStream functions, but can't for InputDStreams because the InputDStream constructor
// adds itself to the context dag. Yay for constructors with global side effects.
// TODO: (HYDRATOR-1030) figure out how to do this at configure time instead of run time
MacroEvaluator macroEvaluator = new ErrorMacroEvaluator("Due to spark limitations, macro evaluation is not allowed in streaming sources when checkpointing " + "is enabled.");
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), spec.isStageLoggingEnabled(), spec.isProcessTimingEnabled());
source = pluginContext.newPluginInstance(stageSpec.getName(), macroEvaluator);
}
DataTracer dataTracer = sec.getDataTracer(stageSpec.getName());
StreamingContext sourceContext = new DefaultStreamingContext(stageSpec, sec, streamingContext);
JavaDStream<Object> javaDStream = source.getStream(sourceContext);
if (dataTracer.isEnabled()) {
// it will create a new function for each RDD, which would limit each RDD but not the entire DStream.
javaDStream = javaDStream.transform(new LimitingFunction<>(spec.getNumOfRecordsPreview()));
}
JavaDStream<RecordInfo<Object>> outputDStream = javaDStream.transform(new CountingTransformFunction<>(stageSpec.getName(), sec.getMetrics(), "records.out", dataTracer)).map(new WrapOutputTransformFunction<>(stageSpec.getName()));
return new DStreamCollection<>(sec, outputDStream);
}
use of co.cask.cdap.api.preview.DataTracer in project cdap by caskdata.
the class SparkStreamingPipelineRunner method getSource.
@Override
protected SparkCollection<Tuple2<Boolean, Object>> getSource(StageInfo stageInfo) throws Exception {
StreamingSource<Object> source;
if (checkpointsDisabled) {
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageInfo, sec);
source = pluginFunctionContext.createPlugin();
} else {
// check for macros in any StreamingSource. If checkpoints are enabled,
// SparkStreaming will serialize all InputDStreams created in the checkpoint, which means
// the InputDStream is deserialized directly from the checkpoint instead of instantiated through CDAP.
// This means there isn't any way for us to perform macro evaluation on sources when they are loaded from
// checkpoints. We can work around this in all other pipeline stages by dynamically instantiating the
// plugin in all DStream functions, but can't for InputDStreams because the InputDStream constructor
// adds itself to the context dag. Yay for constructors with global side effects.
// TODO: (HYDRATOR-1030) figure out how to do this at configure time instead of run time
MacroEvaluator macroEvaluator = new ErrorMacroEvaluator("Due to spark limitations, macro evaluation is not allowed in streaming sources when checkpointing " + "is enabled.");
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), spec.isStageLoggingEnabled(), spec.isProcessTimingEnabled());
source = pluginContext.newPluginInstance(stageInfo.getName(), macroEvaluator);
}
DataTracer dataTracer = sec.getDataTracer(stageInfo.getName());
StreamingContext sourceContext = new DefaultStreamingContext(stageInfo, sec, streamingContext);
JavaDStream<Object> javaDStream = source.getStream(sourceContext);
if (dataTracer.isEnabled()) {
// it will create a new function for each RDD, which would limit each RDD but not the entire DStream.
javaDStream = javaDStream.transform(new LimitingFunction<>(spec.getNumOfRecordsPreview()));
}
JavaDStream<Tuple2<Boolean, Object>> outputDStream = javaDStream.transform(new CountingTransformFunction<>(stageInfo.getName(), sec.getMetrics(), "records.out", dataTracer)).map(new WrapOutputTransformFunction<>());
return new DStreamCollection<>(sec, outputDStream);
}
Aggregations