use of co.cask.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class JavaSparkMainWrapper method run.
@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
String stageName = sec.getSpecification().getProperty(ExternalSparkProgram.STAGE_NAME);
BatchPhaseSpec batchPhaseSpec = GSON.fromJson(sec.getSpecification().getProperty(Constants.PIPELINEID), BatchPhaseSpec.class);
PipelinePluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), batchPhaseSpec.isStageLoggingEnabled(), batchPhaseSpec.isProcessTimingEnabled());
Class<?> mainClass = pluginContext.loadPluginClass(stageName);
// if it's a CDAP JavaSparkMain, instantiate it and call the run method
if (JavaSparkMain.class.isAssignableFrom(mainClass)) {
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(sec.getWorkflowToken(), sec.getRuntimeArguments(), sec.getLogicalStartTime(), sec.getSecureStore(), sec.getNamespace());
JavaSparkMain javaSparkMain = pluginContext.newPluginInstance(stageName, macroEvaluator);
javaSparkMain.run(sec);
} else {
// otherwise, assume there is a 'main' method and call it
String programArgs = getProgramArgs(sec, stageName);
String[] args = programArgs == null ? RuntimeArguments.toPosixArray(sec.getRuntimeArguments()) : programArgs.split(" ");
final Method mainMethod = mainClass.getMethod("main", String[].class);
final Object[] methodArgs = new Object[1];
methodArgs[0] = args;
Caller caller = pluginContext.getCaller(stageName);
caller.call(new Callable<Void>() {
@Override
public Void call() throws Exception {
mainMethod.invoke(null, methodArgs);
return null;
}
});
}
}
use of co.cask.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class StreamingBatchSinkFunction method call.
@Override
public Void call(JavaRDD<T> data, Time batchTime) throws Exception {
final long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(sec.getWorkflowToken(), sec.getRuntimeArguments(), logicalStartTime, sec.getSecureStore(), sec.getNamespace());
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageInfo.isStageLoggingEnabled(), stageInfo.isProcessTimingEnabled());
final SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
final String stageName = stageInfo.getName();
final BatchSink<Object, Object, Object> batchSink = pluginContext.newPluginInstance(stageName, evaluator);
boolean isPrepared = false;
boolean isDone = false;
try {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, logicalStartTime, stageInfo);
batchSink.prepareRun(sinkContext);
}
});
isPrepared = true;
sinkFactory.writeFromRDD(data.flatMapToPair(sinkFunction), sec, stageName, Object.class, Object.class);
isDone = true;
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, logicalStartTime, stageInfo);
batchSink.onRunFinish(true, sinkContext);
}
});
} catch (Exception e) {
LOG.error("Error writing to sink {} for the batch for time {}.", stageName, logicalStartTime, e);
} finally {
if (isPrepared && !isDone) {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, logicalStartTime, stageInfo);
batchSink.onRunFinish(false, sinkContext);
}
});
}
}
return null;
}
use of co.cask.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class StreamingSparkSinkFunction method call.
@Override
public Void call(JavaRDD<T> data, Time batchTime) throws Exception {
final long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(sec.getWorkflowToken(), sec.getRuntimeArguments(), logicalStartTime, sec.getSecureStore(), sec.getNamespace());
final PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageInfo.isStageLoggingEnabled(), stageInfo.isProcessTimingEnabled());
final String stageName = stageInfo.getName();
final SparkSink<T> sparkSink = pluginContext.newPluginInstance(stageName, evaluator);
boolean isPrepared = false;
boolean isDone = false;
try {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkPluginContext context = new BasicSparkPluginContext(sec, datasetContext, stageInfo);
sparkSink.prepareRun(context);
}
});
isPrepared = true;
final SparkExecutionPluginContext sparkExecutionPluginContext = new SparkStreamingExecutionContext(sec, JavaSparkContext.fromSparkContext(data.rdd().context()), logicalStartTime, stageInfo);
final JavaRDD<T> countedRDD = data.map(new CountingFunction<T>(stageName, sec.getMetrics(), "records.in", null)).cache();
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
sparkSink.run(sparkExecutionPluginContext, countedRDD);
}
});
isDone = true;
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkPluginContext context = new BasicSparkPluginContext(sec, datasetContext, stageInfo);
sparkSink.onRunFinish(true, context);
}
});
} catch (Exception e) {
LOG.error("Error while executing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
} finally {
if (isPrepared && !isDone) {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkPluginContext context = new BasicSparkPluginContext(sec, datasetContext, stageInfo);
sparkSink.onRunFinish(false, context);
}
});
}
}
return null;
}
use of co.cask.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class SparkPipelineRunner method runPipeline.
public void runPipeline(PipelinePhase pipelinePhase, String sourcePluginType, JavaSparkExecutionContext sec, Map<String, Integer> stagePartitions, PipelinePluginContext pluginContext) throws Exception {
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(sec.getWorkflowToken(), sec.getRuntimeArguments(), sec.getLogicalStartTime(), sec, sec.getNamespace());
Map<String, SparkCollection<Object>> stageDataCollections = new HashMap<>();
Map<String, SparkCollection<ErrorRecord<Object>>> stageErrorCollections = new HashMap<>();
// should never happen, but removes warning
if (pipelinePhase.getDag() == null) {
throw new IllegalStateException("Pipeline phase has no connections.");
}
for (String stageName : pipelinePhase.getDag().getTopologicalOrder()) {
StageInfo stageInfo = pipelinePhase.getStage(stageName);
//noinspection ConstantConditions
String pluginType = stageInfo.getPluginType();
// don't want to do an additional filter for stages that can emit errors,
// but aren't connected to an ErrorTransform
boolean hasErrorOutput = false;
Set<String> outputs = pipelinePhase.getStageOutputs(stageInfo.getName());
for (String output : outputs) {
//noinspection ConstantConditions
if (ErrorTransform.PLUGIN_TYPE.equals(pipelinePhase.getStage(output).getPluginType())) {
hasErrorOutput = true;
break;
}
}
SparkCollection<Object> stageData = null;
Map<String, SparkCollection<Object>> inputDataCollections = new HashMap<>();
Set<String> stageInputs = stageInfo.getInputs();
for (String inputStageName : stageInputs) {
inputDataCollections.put(inputStageName, stageDataCollections.get(inputStageName));
}
// initialize the stageRDD as the union of all input RDDs.
if (!inputDataCollections.isEmpty()) {
Iterator<SparkCollection<Object>> inputCollectionIter = inputDataCollections.values().iterator();
stageData = inputCollectionIter.next();
// don't union inputs records if we're joining or if we're processing errors
while (!BatchJoiner.PLUGIN_TYPE.equals(pluginType) && !ErrorTransform.PLUGIN_TYPE.equals(pluginType) && inputCollectionIter.hasNext()) {
stageData = stageData.union(inputCollectionIter.next());
}
}
SparkCollection<ErrorRecord<Object>> stageErrors = null;
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageInfo, sec);
if (stageData == null) {
// null in the other else-if conditions
if (sourcePluginType.equals(pluginType)) {
SparkCollection<Tuple2<Boolean, Object>> combinedData = getSource(stageInfo);
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
} else {
throw new IllegalStateException(String.format("Stage '%s' has no input and is not a source.", stageName));
}
} else if (BatchSink.PLUGIN_TYPE.equals(pluginType)) {
stageData.store(stageInfo, Compat.convert(new BatchSinkFunction(pluginFunctionContext)));
} else if (Transform.PLUGIN_TYPE.equals(pluginType)) {
SparkCollection<Tuple2<Boolean, Object>> combinedData = stageData.transform(stageInfo);
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
} else if (ErrorTransform.PLUGIN_TYPE.equals(pluginType)) {
// union all the errors coming into this stage
SparkCollection<ErrorRecord<Object>> inputErrors = null;
for (String inputStage : stageInputs) {
SparkCollection<ErrorRecord<Object>> inputErrorsFromStage = stageErrorCollections.get(inputStage);
if (inputErrorsFromStage == null) {
continue;
}
if (inputErrors == null) {
inputErrors = inputErrorsFromStage;
} else {
inputErrors = inputErrors.union(inputErrorsFromStage);
}
}
if (inputErrors != null) {
SparkCollection<Tuple2<Boolean, Object>> combinedData = inputErrors.flatMap(stageInfo, Compat.convert(new ErrorTransformFunction<>(pluginFunctionContext)));
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
}
} else if (SparkCompute.PLUGIN_TYPE.equals(pluginType)) {
SparkCompute<Object, Object> sparkCompute = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData = stageData.compute(stageInfo, sparkCompute);
} else if (SparkSink.PLUGIN_TYPE.equals(pluginType)) {
SparkSink<Object> sparkSink = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData.store(stageInfo, sparkSink);
} else if (BatchAggregator.PLUGIN_TYPE.equals(pluginType)) {
Integer partitions = stagePartitions.get(stageName);
SparkCollection<Tuple2<Boolean, Object>> combinedData = stageData.aggregate(stageInfo, partitions);
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
} else if (BatchJoiner.PLUGIN_TYPE.equals(pluginType)) {
BatchJoiner<Object, Object, Object> joiner = pluginContext.newPluginInstance(stageName, macroEvaluator);
BatchJoinerRuntimeContext joinerRuntimeContext = pluginFunctionContext.createBatchRuntimeContext();
joiner.initialize(joinerRuntimeContext);
Map<String, SparkPairCollection<Object, Object>> preJoinStreams = new HashMap<>();
for (Map.Entry<String, SparkCollection<Object>> inputStreamEntry : inputDataCollections.entrySet()) {
String inputStage = inputStreamEntry.getKey();
SparkCollection<Object> inputStream = inputStreamEntry.getValue();
preJoinStreams.put(inputStage, addJoinKey(stageInfo, inputStage, inputStream));
}
Set<String> remainingInputs = new HashSet<>();
remainingInputs.addAll(inputDataCollections.keySet());
Integer numPartitions = stagePartitions.get(stageName);
SparkPairCollection<Object, List<JoinElement<Object>>> joinedInputs = null;
// inner join on required inputs
for (final String inputStageName : joiner.getJoinConfig().getRequiredInputs()) {
SparkPairCollection<Object, Object> preJoinCollection = preJoinStreams.get(inputStageName);
if (joinedInputs == null) {
joinedInputs = preJoinCollection.mapValues(new InitialJoinFunction<>(inputStageName));
} else {
JoinFlattenFunction<Object> joinFlattenFunction = new JoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.join(preJoinCollection).mapValues(joinFlattenFunction) : joinedInputs.join(preJoinCollection, numPartitions).mapValues(joinFlattenFunction);
}
remainingInputs.remove(inputStageName);
}
// outer join on non-required inputs
boolean isFullOuter = joinedInputs == null;
for (final String inputStageName : remainingInputs) {
SparkPairCollection<Object, Object> preJoinStream = preJoinStreams.get(inputStageName);
if (joinedInputs == null) {
joinedInputs = preJoinStream.mapValues(new InitialJoinFunction<>(inputStageName));
} else {
if (isFullOuter) {
OuterJoinFlattenFunction<Object> flattenFunction = new OuterJoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.fullOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.fullOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
} else {
LeftJoinFlattenFunction<Object> flattenFunction = new LeftJoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.leftOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.leftOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
}
}
}
// should never happen, but removes warnings
if (joinedInputs == null) {
throw new IllegalStateException("There are no inputs into join stage " + stageName);
}
stageData = mergeJoinResults(stageInfo, joinedInputs).cache();
} else if (Windower.PLUGIN_TYPE.equals(pluginType)) {
Windower windower = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData = stageData.window(stageInfo, windower);
} else {
throw new IllegalStateException(String.format("Stage %s is of unsupported plugin type %s.", stageName, pluginType));
}
if (shouldCache(pipelinePhase, stageInfo)) {
stageData = stageData.cache();
if (stageErrors != null) {
stageErrors = stageErrors.cache();
}
}
stageDataCollections.put(stageName, stageData);
stageErrorCollections.put(stageName, stageErrors);
}
}
use of co.cask.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class ETLMapReduce method initialize.
@Override
public void initialize() throws Exception {
MapReduceContext context = getContext();
Map<String, String> properties = context.getSpecification().getProperties();
if (Boolean.valueOf(properties.get(Constants.STAGE_LOGGING_ENABLED))) {
LogStageInjector.start();
}
CompositeFinisher.Builder finishers = CompositeFinisher.builder();
Job job = context.getHadoopJob();
Configuration hConf = job.getConfiguration();
hConf.setBoolean("mapreduce.map.speculative", false);
hConf.setBoolean("mapreduce.reduce.speculative", false);
// plugin name -> runtime args for that plugin
Map<String, Map<String, String>> runtimeArgs = new HashMap<>();
MacroEvaluator evaluator = new DefaultMacroEvaluator(context.getWorkflowToken(), context.getRuntimeArguments(), context.getLogicalStartTime(), context, context.getNamespace());
BatchPhaseSpec phaseSpec = GSON.fromJson(properties.get(Constants.PIPELINEID), BatchPhaseSpec.class);
for (Map.Entry<String, String> pipelineProperty : phaseSpec.getPipelineProperties().entrySet()) {
hConf.set(pipelineProperty.getKey(), pipelineProperty.getValue());
}
PipelinePhase phase = phaseSpec.getPhase();
PipelinePluginInstantiator pluginInstantiator = new PipelinePluginInstantiator(context, mrMetrics, phaseSpec);
Map<String, String> inputAliasToStage = new HashMap<>();
for (String sourceName : phaseSpec.getPhase().getSources()) {
try {
BatchConfigurable<BatchSourceContext> batchSource = pluginInstantiator.newPluginInstance(sourceName, evaluator);
StageInfo stageInfo = phaseSpec.getPhase().getStage(sourceName);
MapReduceBatchContext sourceContext = new MapReduceBatchContext(context, mrMetrics, stageInfo);
batchSource.prepareRun(sourceContext);
runtimeArgs.put(sourceName, sourceContext.getRuntimeArguments());
for (String inputAlias : sourceContext.getInputNames()) {
inputAliasToStage.put(inputAlias, sourceName);
}
finishers.add(batchSource, sourceContext);
} catch (Exception e) {
// Catch the Exception to generate a User Error Log for the Pipeline
PIPELINE_LOG.error("Failed to initialize batch source '{}' with the error: {}. Please review your pipeline " + "configuration and check the system logs for more details.", sourceName, Throwables.getRootCause(e).getMessage(), Throwables.getRootCause(e));
throw e;
}
}
hConf.set(INPUT_ALIAS_KEY, GSON.toJson(inputAliasToStage));
Map<String, SinkOutput> sinkOutputs = new HashMap<>();
for (StageInfo stageInfo : Sets.union(phase.getStagesOfType(Constants.CONNECTOR_TYPE), phase.getStagesOfType(BatchSink.PLUGIN_TYPE))) {
String sinkName = stageInfo.getName();
// todo: add a better way to get info for all sinks
if (!phase.getSinks().contains(sinkName)) {
continue;
}
try {
BatchConfigurable<BatchSinkContext> batchSink = pluginInstantiator.newPluginInstance(sinkName, evaluator);
MapReduceBatchContext sinkContext = new MapReduceBatchContext(context, mrMetrics, stageInfo);
batchSink.prepareRun(sinkContext);
runtimeArgs.put(sinkName, sinkContext.getRuntimeArguments());
finishers.add(batchSink, sinkContext);
sinkOutputs.put(sinkName, new SinkOutput(sinkContext.getOutputNames(), stageInfo.getErrorDatasetName()));
} catch (Exception e) {
// Catch the Exception to generate a User Error Log for the Pipeline
PIPELINE_LOG.error("Failed to initialize batch sink '{}' with the error: {}. Please review your pipeline " + "configuration and check the system logs for more details.", sinkName, Throwables.getRootCause(e).getMessage(), Throwables.getRootCause(e));
throw e;
}
}
finisher = finishers.build();
hConf.set(SINK_OUTPUTS_KEY, GSON.toJson(sinkOutputs));
// setup time partition for each error dataset
for (StageInfo stageInfo : Sets.union(phase.getStagesOfType(Transform.PLUGIN_TYPE), phase.getStagesOfType(BatchSink.PLUGIN_TYPE))) {
if (stageInfo.getErrorDatasetName() != null) {
Map<String, String> args = new HashMap<>();
args.put(FileSetProperties.OUTPUT_PROPERTIES_PREFIX + "avro.schema.output.key", Constants.ERROR_SCHEMA.toString());
TimePartitionedFileSetArguments.setOutputPartitionTime(args, context.getLogicalStartTime());
context.addOutput(Output.ofDataset(stageInfo.getErrorDatasetName(), args));
}
}
job.setMapperClass(ETLMapper.class);
Set<StageInfo> reducers = phaseSpec.getPhase().getStagesOfType(BatchAggregator.PLUGIN_TYPE, BatchJoiner.PLUGIN_TYPE);
if (!reducers.isEmpty()) {
job.setReducerClass(ETLReducer.class);
String reducerName = reducers.iterator().next().getName();
StageInfo stageInfo = phase.getStage(reducerName);
Class<?> outputKeyClass;
Class<?> outputValClass;
try {
if (!phaseSpec.getPhase().getStagesOfType(BatchAggregator.PLUGIN_TYPE).isEmpty()) {
BatchAggregator aggregator = pluginInstantiator.newPluginInstance(reducerName, evaluator);
DefaultAggregatorContext aggregatorContext = new DefaultAggregatorContext(context, mrMetrics, stageInfo);
aggregator.prepareRun(aggregatorContext);
finishers.add(aggregator, aggregatorContext);
if (aggregatorContext.getNumPartitions() != null) {
job.setNumReduceTasks(aggregatorContext.getNumPartitions());
}
outputKeyClass = aggregatorContext.getGroupKeyClass();
outputValClass = aggregatorContext.getGroupValueClass();
if (outputKeyClass == null) {
outputKeyClass = TypeChecker.getGroupKeyClass(aggregator);
}
if (outputValClass == null) {
outputValClass = TypeChecker.getGroupValueClass(aggregator);
}
hConf.set(MAP_KEY_CLASS, outputKeyClass.getName());
hConf.set(MAP_VAL_CLASS, outputValClass.getName());
job.setMapOutputKeyClass(getOutputKeyClass(reducerName, outputKeyClass));
job.setMapOutputValueClass(getOutputValClass(reducerName, outputValClass));
} else {
// reducer type is joiner
BatchJoiner batchJoiner = pluginInstantiator.newPluginInstance(reducerName, evaluator);
DefaultJoinerContext joinerContext = new DefaultJoinerContext(context, mrMetrics, stageInfo);
batchJoiner.prepareRun(joinerContext);
finishers.add(batchJoiner, joinerContext);
if (joinerContext.getNumPartitions() != null) {
job.setNumReduceTasks(joinerContext.getNumPartitions());
}
outputKeyClass = joinerContext.getJoinKeyClass();
Class<?> inputRecordClass = joinerContext.getJoinInputRecordClass();
if (outputKeyClass == null) {
outputKeyClass = TypeChecker.getJoinKeyClass(batchJoiner);
}
if (inputRecordClass == null) {
inputRecordClass = TypeChecker.getJoinInputRecordClass(batchJoiner);
}
hConf.set(MAP_KEY_CLASS, outputKeyClass.getName());
hConf.set(MAP_VAL_CLASS, inputRecordClass.getName());
job.setMapOutputKeyClass(getOutputKeyClass(reducerName, outputKeyClass));
getOutputValClass(reducerName, inputRecordClass);
// for joiner plugin map output is tagged with stageName
job.setMapOutputValueClass(TaggedWritable.class);
}
} catch (Exception e) {
// Catch the Exception to generate a User Error Log for the Pipeline
PIPELINE_LOG.error("Failed to initialize pipeline stage '{}' with the error: {}. Please review your pipeline " + "configuration and check the system logs for more details.", reducerName, Throwables.getRootCause(e).getMessage(), Throwables.getRootCause(e));
throw e;
}
} else {
job.setNumReduceTasks(0);
}
hConf.set(RUNTIME_ARGS_KEY, GSON.toJson(runtimeArgs));
}
Aggregations