use of io.cdap.cdap.etl.spark.function.MultiSinkFunction in project cdap by caskdata.
the class BaseRDDCollection method createMultiStoreTask.
@Override
public Runnable createMultiStoreTask(PhaseSpec phaseSpec, Set<String> group, Set<String> sinks, Map<String, StageStatisticsCollector> collectors) {
return new Runnable() {
@Override
public void run() {
PairFlatMapFunction<T, String, KeyValue<Object, Object>> multiSinkFunction = (PairFlatMapFunction<T, String, KeyValue<Object, Object>>) new MultiSinkFunction(sec, phaseSpec, group, collectors);
JavaPairRDD<String, KeyValue<Object, Object>> taggedOutput = rdd.flatMapToPair(multiSinkFunction);
for (String outputName : sinkFactory.writeCombinedRDD(taggedOutput, sec, sinks)) {
recordLineage(outputName);
}
}
};
}
use of io.cdap.cdap.etl.spark.function.MultiSinkFunction in project cdap by caskdata.
the class StreamingMultiSinkFunction method call.
@Override
public void call(JavaRDD<RecordInfo<Object>> data, Time batchTime) throws Exception {
long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
Map<String, SubmitterLifecycle<?>> stages = createStages(evaluator);
// call prepareRun() on all the stages in the group
// need to call it in an order that guarantees that inputs are called before outputs
// this is because plugins can call getArguments().set() in the prepareRun() method,
// which downstream stages should be able to read
List<String> traversalOrder = new ArrayList(group.size());
for (String stageName : phaseSpec.getPhase().getDag().getTopologicalOrder()) {
if (group.contains(stageName)) {
traversalOrder.add(stageName);
}
}
for (String stageName : traversalOrder) {
SubmitterLifecycle<?> plugin = stages.get(stageName);
StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
try {
prepareRun(pipelineRuntime, sinkFactory, stageSpec, plugin);
} catch (Exception e) {
LOG.error("Error preparing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
return;
}
}
// run the actual transforms and sinks in this group
boolean ranSuccessfully = true;
try {
MultiSinkFunction multiSinkFunction = new MultiSinkFunction(sec, phaseSpec, group, collectors);
Set<String> outputNames = sinkFactory.writeCombinedRDD(data.flatMapToPair(multiSinkFunction), sec, sinkNames);
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
for (String outputName : outputNames) {
ExternalDatasets.registerLineage(sec.getAdmin(), outputName, AccessType.WRITE, null, () -> context.getDataset(outputName));
}
}
});
} catch (Exception e) {
LOG.error("Error writing to sinks {} for the batch for time {}.", sinkNames, logicalStartTime, e);
ranSuccessfully = false;
}
// run onRunFinish() for each sink
for (String stageName : traversalOrder) {
SubmitterLifecycle<?> plugin = stages.get(stageName);
StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
try {
onRunFinish(pipelineRuntime, sinkFactory, stageSpec, plugin, ranSuccessfully);
} catch (Exception e) {
LOG.warn("Unable to execute onRunFinish for sink {}", stageName, e);
}
}
}
Aggregations