use of io.cdap.cdap.etl.spark.streaming.DStreamCollection in project cdap by caskdata.
the class SparkStreamingPipelineRunner method mergeJoinResults.
@Override
protected SparkCollection<Object> mergeJoinResults(StageSpec stageSpec, FunctionCache.Factory functionCacheFactory, SparkPairCollection<Object, List<JoinElement<Object>>> joinedInputs, StageStatisticsCollector collector) throws Exception {
DynamicDriverContext dynamicDriverContext = new DynamicDriverContext(stageSpec, sec, collector);
JavaPairDStream<Object, List<JoinElement<Object>>> pairDStream = joinedInputs.getUnderlying();
JavaDStream<Object> result = pairDStream.transform(new DynamicJoinMerge<>(dynamicDriverContext, functionCacheFactory.newCache()));
return new DStreamCollection<>(sec, functionCacheFactory, result);
}
use of io.cdap.cdap.etl.spark.streaming.DStreamCollection in project cdap by caskdata.
the class SparkStreamingPipelineRunner method getSource.
@Override
protected SparkCollection<RecordInfo<Object>> getSource(StageSpec stageSpec, FunctionCache.Factory functionCacheFactory, StageStatisticsCollector collector) throws Exception {
StreamingSource<Object> source;
if (checkpointsDisabled) {
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
source = pluginFunctionContext.createPlugin();
} else {
// check for macros in any StreamingSource. If checkpoints are enabled,
// SparkStreaming will serialize all InputDStreams created in the checkpoint, which means
// the InputDStream is deserialized directly from the checkpoint instead of instantiated through CDAP.
// This means there isn't any way for us to perform macro evaluation on sources when they are loaded from
// checkpoints. We can work around this in all other pipeline stages by dynamically instantiating the
// plugin in all DStream functions, but can't for InputDStreams because the InputDStream constructor
// adds itself to the context dag. Yay for constructors with global side effects.
// TODO: (HYDRATOR-1030) figure out how to do this at configure time instead of run time
MacroEvaluator macroEvaluator = new ErrorMacroEvaluator("Due to spark limitations, macro evaluation is not allowed in streaming sources when checkpointing " + "is enabled.");
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), spec.isStageLoggingEnabled(), spec.isProcessTimingEnabled());
source = pluginContext.newPluginInstance(stageSpec.getName(), macroEvaluator);
}
DataTracer dataTracer = sec.getDataTracer(stageSpec.getName());
StreamingContext sourceContext = new DefaultStreamingContext(stageSpec, sec, streamingContext);
JavaDStream<Object> javaDStream = source.getStream(sourceContext);
if (dataTracer.isEnabled()) {
// it will create a new function for each RDD, which would limit each RDD but not the entire DStream.
javaDStream = javaDStream.transform(new LimitingFunction<>(spec.getNumOfRecordsPreview()));
}
JavaDStream<RecordInfo<Object>> outputDStream = javaDStream.transform(new CountingTransformFunction<>(stageSpec.getName(), sec.getMetrics(), "records.out", dataTracer)).map(new WrapOutputTransformFunction<>(stageSpec.getName()));
return new DStreamCollection<>(sec, functionCacheFactory, outputDStream);
}
Aggregations