use of co.cask.cdap.etl.common.StageStatisticsCollector in project cdap by caskdata.
the class SparkStreamingPipelineDriver method run.
private JavaStreamingContext run(final DataStreamsPipelineSpec pipelineSpec, final PipelinePhase pipelinePhase, final JavaSparkExecutionContext sec, @Nullable final String checkpointDir) throws Exception {
Function0<JavaStreamingContext> contextFunction = new Function0<JavaStreamingContext>() {
@Override
public JavaStreamingContext call() throws Exception {
JavaStreamingContext jssc = new JavaStreamingContext(new JavaSparkContext(), Durations.milliseconds(pipelineSpec.getBatchIntervalMillis()));
SparkStreamingPipelineRunner runner = new SparkStreamingPipelineRunner(sec, jssc, pipelineSpec, false);
PipelinePluginContext pluginContext = new PipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), pipelineSpec.isStageLoggingEnabled(), pipelineSpec.isProcessTimingEnabled());
// Seems like they should be set at configure time instead of runtime? but that requires an API change.
try {
runner.runPipeline(pipelinePhase, StreamingSource.PLUGIN_TYPE, sec, new HashMap<String, Integer>(), pluginContext, new HashMap<String, StageStatisticsCollector>());
} catch (Exception e) {
throw new RuntimeException(e);
}
if (checkpointDir != null) {
jssc.checkpoint(checkpointDir);
}
return jssc;
}
};
return checkpointDir == null ? contextFunction.call() : StreamingCompat.getOrCreate(checkpointDir, contextFunction);
}
use of co.cask.cdap.etl.common.StageStatisticsCollector in project cdap by caskdata.
the class BatchSparkPipelineDriver method updateWorkflowToken.
private void updateWorkflowToken(WorkflowToken token, Map<String, StageStatisticsCollector> collectors) {
for (Map.Entry<String, StageStatisticsCollector> entry : collectors.entrySet()) {
SparkStageStatisticsCollector collector = (SparkStageStatisticsCollector) entry.getValue();
String keyPrefix = Constants.StageStatistics.PREFIX + "." + entry.getKey() + ".";
String inputRecordKey = keyPrefix + Constants.StageStatistics.INPUT_RECORDS;
token.put(inputRecordKey, String.valueOf(collector.getInputRecordCount()));
String outputRecordKey = keyPrefix + Constants.StageStatistics.OUTPUT_RECORDS;
token.put(outputRecordKey, String.valueOf(collector.getOutputRecordCount()));
String errorRecordKey = keyPrefix + Constants.StageStatistics.ERROR_RECORDS;
token.put(errorRecordKey, String.valueOf(collector.getErrorRecordCount()));
}
}
use of co.cask.cdap.etl.common.StageStatisticsCollector in project cdap by caskdata.
the class MapReduceTransformExecutorFactory method getMultiOutputTransform.
private <IN, ERROR> TrackedMultiOutputTransform<IN, ERROR> getMultiOutputTransform(StageSpec stageSpec) throws Exception {
String stageName = stageSpec.getName();
DefaultMacroEvaluator macroEvaluator = new DefaultMacroEvaluator(arguments, taskContext.getLogicalStartTime(), taskContext, taskContext.getNamespace());
SplitterTransform<IN, ERROR> splitterTransform = pluginInstantiator.newPluginInstance(stageName, macroEvaluator);
TransformContext transformContext = createRuntimeContext(stageSpec);
splitterTransform.initialize(transformContext);
StageMetrics stageMetrics = new DefaultStageMetrics(metrics, stageName);
TaskAttemptContext taskAttemptContext = (TaskAttemptContext) taskContext.getHadoopContext();
StageStatisticsCollector collector = isPipelineContainsCondition ? new MapReduceStageStatisticsCollector(stageName, taskAttemptContext) : new NoopStageStatisticsCollector();
return new TrackedMultiOutputTransform<>(splitterTransform, stageMetrics, taskContext.getDataTracer(stageName), collector);
}
use of co.cask.cdap.etl.common.StageStatisticsCollector in project cdap by caskdata.
the class SparkPipelineRunner method runPipeline.
public void runPipeline(PipelinePhase pipelinePhase, String sourcePluginType, JavaSparkExecutionContext sec, Map<String, Integer> stagePartitions, PluginContext pluginContext, Map<String, StageStatisticsCollector> collectors) throws Exception {
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(new BasicArguments(sec), sec.getLogicalStartTime(), sec, sec.getNamespace());
Map<String, EmittedRecords> emittedRecords = new HashMap<>();
// should never happen, but removes warning
if (pipelinePhase.getDag() == null) {
throw new IllegalStateException("Pipeline phase has no connections.");
}
for (String stageName : pipelinePhase.getDag().getTopologicalOrder()) {
StageSpec stageSpec = pipelinePhase.getStage(stageName);
// noinspection ConstantConditions
String pluginType = stageSpec.getPluginType();
EmittedRecords.Builder emittedBuilder = EmittedRecords.builder();
// don't want to do an additional filter for stages that can emit errors,
// but aren't connected to an ErrorTransform
// similarly, don't want to do an additional filter for alerts when the stage isn't connected to
// an AlertPublisher
boolean hasErrorOutput = false;
boolean hasAlertOutput = false;
Set<String> outputs = pipelinePhase.getStageOutputs(stageSpec.getName());
for (String output : outputs) {
String outputPluginType = pipelinePhase.getStage(output).getPluginType();
// noinspection ConstantConditions
if (ErrorTransform.PLUGIN_TYPE.equals(outputPluginType)) {
hasErrorOutput = true;
} else if (AlertPublisher.PLUGIN_TYPE.equals(outputPluginType)) {
hasAlertOutput = true;
}
}
SparkCollection<Object> stageData = null;
Map<String, SparkCollection<Object>> inputDataCollections = new HashMap<>();
Set<String> stageInputs = pipelinePhase.getStageInputs(stageName);
for (String inputStageName : stageInputs) {
StageSpec inputStageSpec = pipelinePhase.getStage(inputStageName);
if (inputStageSpec == null) {
// means the input to this stage is in a separate phase. For example, it is an action.
continue;
}
String port = null;
// not errors or alerts or output port records
if (!Constants.Connector.PLUGIN_TYPE.equals(inputStageSpec.getPluginType()) && !Constants.Connector.PLUGIN_TYPE.equals(pluginType)) {
port = inputStageSpec.getOutputPorts().get(stageName).getPort();
}
SparkCollection<Object> inputRecords = port == null ? emittedRecords.get(inputStageName).outputRecords : emittedRecords.get(inputStageName).outputPortRecords.get(port);
inputDataCollections.put(inputStageName, inputRecords);
}
// initialize the stageRDD as the union of all input RDDs.
if (!inputDataCollections.isEmpty()) {
Iterator<SparkCollection<Object>> inputCollectionIter = inputDataCollections.values().iterator();
stageData = inputCollectionIter.next();
// don't union inputs records if we're joining or if we're processing errors
while (!BatchJoiner.PLUGIN_TYPE.equals(pluginType) && !ErrorTransform.PLUGIN_TYPE.equals(pluginType) && inputCollectionIter.hasNext()) {
stageData = stageData.union(inputCollectionIter.next());
}
}
boolean isConnectorSource = Constants.Connector.PLUGIN_TYPE.equals(pluginType) && pipelinePhase.getSources().contains(stageName);
boolean isConnectorSink = Constants.Connector.PLUGIN_TYPE.equals(pluginType) && pipelinePhase.getSinks().contains(stageName);
StageStatisticsCollector collector = collectors.get(stageName) == null ? new NoopStageStatisticsCollector() : collectors.get(stageName);
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
if (stageData == null) {
// null in the other else-if conditions
if (sourcePluginType.equals(pluginType) || isConnectorSource) {
SparkCollection<RecordInfo<Object>> combinedData = getSource(stageSpec, collector);
emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, hasErrorOutput, hasAlertOutput);
} else {
throw new IllegalStateException(String.format("Stage '%s' has no input and is not a source.", stageName));
}
} else if (BatchSink.PLUGIN_TYPE.equals(pluginType) || isConnectorSink) {
stageData.store(stageSpec, Compat.convert(new BatchSinkFunction(pluginFunctionContext)));
} else if (Transform.PLUGIN_TYPE.equals(pluginType)) {
SparkCollection<RecordInfo<Object>> combinedData = stageData.transform(stageSpec, collector);
emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, hasErrorOutput, hasAlertOutput);
} else if (SplitterTransform.PLUGIN_TYPE.equals(pluginType)) {
SparkCollection<RecordInfo<Object>> combinedData = stageData.multiOutputTransform(stageSpec, collector);
emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, hasErrorOutput, hasAlertOutput);
} else if (ErrorTransform.PLUGIN_TYPE.equals(pluginType)) {
// union all the errors coming into this stage
SparkCollection<ErrorRecord<Object>> inputErrors = null;
for (String inputStage : stageInputs) {
SparkCollection<ErrorRecord<Object>> inputErrorsFromStage = emittedRecords.get(inputStage).errorRecords;
if (inputErrorsFromStage == null) {
continue;
}
if (inputErrors == null) {
inputErrors = inputErrorsFromStage;
} else {
inputErrors = inputErrors.union(inputErrorsFromStage);
}
}
if (inputErrors != null) {
SparkCollection<RecordInfo<Object>> combinedData = inputErrors.flatMap(stageSpec, Compat.convert(new ErrorTransformFunction<>(pluginFunctionContext)));
emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, hasErrorOutput, hasAlertOutput);
}
} else if (SparkCompute.PLUGIN_TYPE.equals(pluginType)) {
SparkCompute<Object, Object> sparkCompute = pluginContext.newPluginInstance(stageName, macroEvaluator);
emittedBuilder = emittedBuilder.setOutput(stageData.compute(stageSpec, sparkCompute));
} else if (SparkSink.PLUGIN_TYPE.equals(pluginType)) {
SparkSink<Object> sparkSink = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData.store(stageSpec, sparkSink);
} else if (BatchAggregator.PLUGIN_TYPE.equals(pluginType)) {
Integer partitions = stagePartitions.get(stageName);
SparkCollection<RecordInfo<Object>> combinedData = stageData.aggregate(stageSpec, partitions, collector);
emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, hasErrorOutput, hasAlertOutput);
} else if (BatchJoiner.PLUGIN_TYPE.equals(pluginType)) {
BatchJoiner<Object, Object, Object> joiner = pluginContext.newPluginInstance(stageName, macroEvaluator);
BatchJoinerRuntimeContext joinerRuntimeContext = pluginFunctionContext.createBatchRuntimeContext();
joiner.initialize(joinerRuntimeContext);
Map<String, SparkPairCollection<Object, Object>> preJoinStreams = new HashMap<>();
for (Map.Entry<String, SparkCollection<Object>> inputStreamEntry : inputDataCollections.entrySet()) {
String inputStage = inputStreamEntry.getKey();
SparkCollection<Object> inputStream = inputStreamEntry.getValue();
preJoinStreams.put(inputStage, addJoinKey(stageSpec, inputStage, inputStream, collector));
}
Set<String> remainingInputs = new HashSet<>();
remainingInputs.addAll(inputDataCollections.keySet());
Integer numPartitions = stagePartitions.get(stageName);
SparkPairCollection<Object, List<JoinElement<Object>>> joinedInputs = null;
// inner join on required inputs
for (final String inputStageName : joiner.getJoinConfig().getRequiredInputs()) {
SparkPairCollection<Object, Object> preJoinCollection = preJoinStreams.get(inputStageName);
if (joinedInputs == null) {
joinedInputs = preJoinCollection.mapValues(new InitialJoinFunction<>(inputStageName));
} else {
JoinFlattenFunction<Object> joinFlattenFunction = new JoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.join(preJoinCollection).mapValues(joinFlattenFunction) : joinedInputs.join(preJoinCollection, numPartitions).mapValues(joinFlattenFunction);
}
remainingInputs.remove(inputStageName);
}
// outer join on non-required inputs
boolean isFullOuter = joinedInputs == null;
for (final String inputStageName : remainingInputs) {
SparkPairCollection<Object, Object> preJoinStream = preJoinStreams.get(inputStageName);
if (joinedInputs == null) {
joinedInputs = preJoinStream.mapValues(new InitialJoinFunction<>(inputStageName));
} else {
if (isFullOuter) {
OuterJoinFlattenFunction<Object> flattenFunction = new OuterJoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.fullOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.fullOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
} else {
LeftJoinFlattenFunction<Object> flattenFunction = new LeftJoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.leftOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.leftOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
}
}
}
// should never happen, but removes warnings
if (joinedInputs == null) {
throw new IllegalStateException("There are no inputs into join stage " + stageName);
}
emittedBuilder = emittedBuilder.setOutput(mergeJoinResults(stageSpec, joinedInputs, collector).cache());
} else if (Windower.PLUGIN_TYPE.equals(pluginType)) {
Windower windower = pluginContext.newPluginInstance(stageName, macroEvaluator);
emittedBuilder = emittedBuilder.setOutput(stageData.window(stageSpec, windower));
} else if (AlertPublisher.PLUGIN_TYPE.equals(pluginType)) {
// union all the alerts coming into this stage
SparkCollection<Alert> inputAlerts = null;
for (String inputStage : stageInputs) {
SparkCollection<Alert> inputErrorsFromStage = emittedRecords.get(inputStage).alertRecords;
if (inputErrorsFromStage == null) {
continue;
}
if (inputAlerts == null) {
inputAlerts = inputErrorsFromStage;
} else {
inputAlerts = inputAlerts.union(inputErrorsFromStage);
}
}
if (inputAlerts != null) {
inputAlerts.publishAlerts(stageSpec, collector);
}
} else {
throw new IllegalStateException(String.format("Stage %s is of unsupported plugin type %s.", stageName, pluginType));
}
emittedRecords.put(stageName, emittedBuilder.build());
}
}
use of co.cask.cdap.etl.common.StageStatisticsCollector in project cdap by caskdata.
the class BatchSparkPipelineDriver method run.
@Override
public void run(DatasetContext context) throws Exception {
BatchPhaseSpec phaseSpec = GSON.fromJson(sec.getSpecification().getProperty(Constants.PIPELINEID), BatchPhaseSpec.class);
Path configFile = sec.getLocalizationContext().getLocalFile("HydratorSpark.config").toPath();
try (BufferedReader reader = Files.newBufferedReader(configFile, StandardCharsets.UTF_8)) {
String object = reader.readLine();
SparkBatchSourceSinkFactoryInfo sourceSinkInfo = GSON.fromJson(object, SparkBatchSourceSinkFactoryInfo.class);
sourceFactory = sourceSinkInfo.getSparkBatchSourceFactory();
sinkFactory = sourceSinkInfo.getSparkBatchSinkFactory();
stagePartitions = sourceSinkInfo.getStagePartitions();
}
datasetContext = context;
numOfRecordsPreview = phaseSpec.getNumOfRecordsPreview();
PipelinePluginContext pluginContext = new PipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
Map<String, StageStatisticsCollector> collectors = new HashMap<>();
if (phaseSpec.pipelineContainsCondition()) {
Iterator<StageSpec> iterator = phaseSpec.getPhase().iterator();
while (iterator.hasNext()) {
StageSpec spec = iterator.next();
collectors.put(spec.getName(), new SparkStageStatisticsCollector(jsc));
}
}
try {
PipelinePluginInstantiator pluginInstantiator = new PipelinePluginInstantiator(pluginContext, sec.getMetrics(), phaseSpec, new SingleConnectorFactory());
runPipeline(phaseSpec.getPhase(), BatchSource.PLUGIN_TYPE, sec, stagePartitions, pluginInstantiator, collectors);
} finally {
updateWorkflowToken(sec.getWorkflowToken(), collectors);
}
}
Aggregations