use of io.cdap.cdap.etl.common.NoopStageStatisticsCollector in project cdap by caskdata.
the class DStreamCollection method compute.
@Override
public <U> SparkCollection<U> compute(StageSpec stageSpec, SparkCompute<T, U> compute) throws Exception {
SparkCompute<T, U> wrappedCompute = new DynamicSparkCompute<>(new DynamicDriverContext(stageSpec, sec, new NoopStageStatisticsCollector()), compute);
Transactionals.execute(sec, new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec);
SparkExecutionPluginContext sparkPluginContext = new BasicSparkExecutionPluginContext(sec, JavaSparkContext.fromSparkContext(stream.context().sparkContext()), datasetContext, pipelineRuntime, stageSpec);
wrappedCompute.initialize(sparkPluginContext);
}
}, Exception.class);
return wrap(stream.transform(new ComputeTransformFunction<>(sec, stageSpec, wrappedCompute)));
}
use of io.cdap.cdap.etl.common.NoopStageStatisticsCollector in project cdap by caskdata.
the class DynamicDriverContext method readExternal.
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
serializationVersion = in.readUTF();
stageSpec = (StageSpec) in.readObject();
sec = (JavaSparkExecutionContext) in.readObject();
// we intentionally do not serialize this context in order to ensure that the runtime arguments
// and logical start time are picked up from the JavaSparkExecutionContext. If we serialized it,
// the arguments and start time of the very first pipeline run would get serialized, then
// used for every subsequent run that loads from the checkpoint.
pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, new NoopStageStatisticsCollector());
}
use of io.cdap.cdap.etl.common.NoopStageStatisticsCollector in project cdap by caskdata.
the class MapReduceTransformExecutorFactory method getTransformation.
@SuppressWarnings("unchecked")
@Override
protected <IN, OUT> TrackedTransform<IN, OUT> getTransformation(StageSpec stageSpec) throws Exception {
String stageName = stageSpec.getName();
String pluginType = stageSpec.getPluginType();
StageMetrics stageMetrics = new DefaultStageMetrics(metrics, stageName);
TaskAttemptContext taskAttemptContext = (TaskAttemptContext) taskContext.getHadoopContext();
StageStatisticsCollector collector = collectStageStatistics ? new MapReduceStageStatisticsCollector(stageName, taskAttemptContext) : new NoopStageStatisticsCollector();
if (BatchAggregator.PLUGIN_TYPE.equals(pluginType)) {
Object plugin = pluginInstantiator.newPluginInstance(stageName, macroEvaluator);
BatchAggregator<?, ?, ?> batchAggregator;
if (plugin instanceof BatchReducibleAggregator) {
BatchReducibleAggregator<?, ?, ?, ?> reducibleAggregator = (BatchReducibleAggregator<?, ?, ?, ?>) plugin;
batchAggregator = new AggregatorBridge<>(reducibleAggregator);
} else {
batchAggregator = (BatchAggregator<?, ?, ?>) plugin;
}
BatchRuntimeContext runtimeContext = createRuntimeContext(stageSpec);
batchAggregator.initialize(runtimeContext);
if (isMapPhase) {
return getTrackedEmitKeyStep(new MapperAggregatorTransformation(batchAggregator, mapOutputKeyClassName, mapOutputValClassName), stageMetrics, getDataTracer(stageName), collector);
} else {
return getTrackedAggregateStep(new ReducerAggregatorTransformation(batchAggregator, mapOutputKeyClassName, mapOutputValClassName), stageMetrics, getDataTracer(stageName), collector);
}
} else if (BatchJoiner.PLUGIN_TYPE.equals(pluginType)) {
Object plugin = pluginInstantiator.newPluginInstance(stageName, macroEvaluator);
BatchJoiner<?, ?, ?> batchJoiner;
Set<String> filterNullKeyStages = new HashSet<>();
if (plugin instanceof BatchAutoJoiner) {
BatchAutoJoiner autoJoiner = (BatchAutoJoiner) plugin;
FailureCollector failureCollector = new LoggingFailureCollector(stageName, stageSpec.getInputSchemas());
DefaultAutoJoinerContext context = DefaultAutoJoinerContext.from(stageSpec.getInputSchemas(), failureCollector);
// definition will be non-null due to validate by PipelinePhasePreparer at the start of the run
JoinDefinition joinDefinition = autoJoiner.define(context);
JoinCondition condition = joinDefinition.getCondition();
// should never happen as it's checked at deployment time, but add this to be safe.
if (condition.getOp() != JoinCondition.Op.KEY_EQUALITY) {
failureCollector.addFailure(String.format("Join stage '%s' uses a %s condition, which is not supported with the MapReduce engine.", stageName, condition.getOp()), "Switch to a different execution engine.");
}
failureCollector.getOrThrowException();
batchJoiner = new JoinerBridge(stageName, autoJoiner, joinDefinition);
// this is the same as filtering out records that have a null key if they are from an optional stage
if (condition.getOp() == JoinCondition.Op.KEY_EQUALITY && !((JoinCondition.OnKeys) condition).isNullSafe()) {
filterNullKeyStages = joinDefinition.getStages().stream().filter(s -> !s.isRequired()).map(JoinStage::getStageName).collect(Collectors.toSet());
}
} else {
batchJoiner = (BatchJoiner<?, ?, ?>) plugin;
}
BatchJoinerRuntimeContext runtimeContext = createRuntimeContext(stageSpec);
batchJoiner.initialize(runtimeContext);
if (isMapPhase) {
return getTrackedEmitKeyStep(new MapperJoinerTransformation(batchJoiner, mapOutputKeyClassName, mapOutputValClassName, filterNullKeyStages), stageMetrics, getDataTracer(stageName), collector);
} else {
return getTrackedMergeStep(new ReducerJoinerTransformation(batchJoiner, mapOutputKeyClassName, mapOutputValClassName, runtimeContext.getInputSchemas().size()), stageMetrics, getDataTracer(stageName), collector);
}
}
return super.getTransformation(stageSpec);
}
use of io.cdap.cdap.etl.common.NoopStageStatisticsCollector in project cdap by caskdata.
the class StreamingBatchSinkFunction method call.
@Override
public void call(JavaRDD<T> data, Time batchTime) throws Exception {
final long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageSpec.isStageLoggingEnabled(), stageSpec.isProcessTimingEnabled());
final SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
final String stageName = stageSpec.getName();
final BatchSink<Object, Object, Object> batchSink = pluginContext.newPluginInstance(stageName, evaluator);
final PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
boolean isPrepared = false;
boolean isDone = false;
try {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
batchSink.prepareRun(sinkContext);
}
});
isPrepared = true;
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, pipelineRuntime.getArguments().asMap(), batchTime.milliseconds(), new NoopStageStatisticsCollector());
Set<String> outputNames = sinkFactory.writeFromRDD(data.flatMapToPair(new BatchSinkFunction<T, Object, Object>(pluginFunctionContext, functionCache)), sec, stageName);
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
for (String outputName : outputNames) {
ExternalDatasets.registerLineage(sec.getAdmin(), outputName, AccessType.WRITE, null, () -> context.getDataset(outputName));
}
}
});
isDone = true;
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
batchSink.onRunFinish(true, sinkContext);
}
});
} catch (Exception e) {
LOG.error("Error writing to sink {} for the batch for time {}.", stageName, logicalStartTime, e);
} finally {
if (isPrepared && !isDone) {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
batchSink.onRunFinish(false, sinkContext);
}
});
}
}
}
use of io.cdap.cdap.etl.common.NoopStageStatisticsCollector in project cdap by caskdata.
the class SparkPipelineRunner method runPipeline.
public void runPipeline(PhaseSpec phaseSpec, String sourcePluginType, JavaSparkExecutionContext sec, Map<String, Integer> stagePartitions, PluginContext pluginContext, Map<String, StageStatisticsCollector> collectors, Set<String> uncombinableSinks, boolean consolidateStages, boolean cacheFunctions) throws Exception {
PipelinePhase pipelinePhase = phaseSpec.getPhase();
BasicArguments arguments = new BasicArguments(sec);
FunctionCache.Factory functionCacheFactory = FunctionCache.Factory.newInstance(cacheFunctions);
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(arguments, sec.getLogicalStartTime(), sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
Map<String, EmittedRecords> emittedRecords = new HashMap<>();
// should never happen, but removes warning
if (pipelinePhase.getDag() == null) {
throw new IllegalStateException("Pipeline phase has no connections.");
}
Set<String> uncombinableStages = new HashSet<>(uncombinableSinks);
for (String uncombinableType : UNCOMBINABLE_PLUGIN_TYPES) {
pipelinePhase.getStagesOfType(uncombinableType).stream().map(StageSpec::getName).forEach(s -> uncombinableStages.add(s));
}
CombinerDag groupedDag = new CombinerDag(pipelinePhase.getDag(), uncombinableStages);
Map<String, Set<String>> groups = consolidateStages ? groupedDag.groupNodes() : Collections.emptyMap();
if (!groups.isEmpty()) {
LOG.debug("Stage consolidation is on.");
int groupNum = 1;
for (Set<String> group : groups.values()) {
LOG.debug("Group{}: {}", groupNum, group);
groupNum++;
}
}
Set<String> branchers = new HashSet<>();
for (String stageName : groupedDag.getNodes()) {
if (groupedDag.getNodeOutputs(stageName).size() > 1) {
branchers.add(stageName);
}
}
Set<String> shufflers = pipelinePhase.getStagesOfType(BatchAggregator.PLUGIN_TYPE).stream().map(StageSpec::getName).collect(Collectors.toSet());
Collection<Runnable> sinkRunnables = new ArrayList<>();
for (String stageName : groupedDag.getTopologicalOrder()) {
if (groups.containsKey(stageName)) {
sinkRunnables.add(handleGroup(sec, phaseSpec, groups.get(stageName), groupedDag.getNodeInputs(stageName), emittedRecords, collectors));
continue;
}
StageSpec stageSpec = pipelinePhase.getStage(stageName);
String pluginType = stageSpec.getPluginType();
EmittedRecords.Builder emittedBuilder = EmittedRecords.builder();
// don't want to do an additional filter for stages that can emit errors,
// but aren't connected to an ErrorTransform
// similarly, don't want to do an additional filter for alerts when the stage isn't connected to
// an AlertPublisher
boolean hasErrorOutput = false;
boolean hasAlertOutput = false;
Set<String> outputs = pipelinePhase.getStageOutputs(stageName);
for (String output : outputs) {
String outputPluginType = pipelinePhase.getStage(output).getPluginType();
// noinspection ConstantConditions
if (ErrorTransform.PLUGIN_TYPE.equals(outputPluginType)) {
hasErrorOutput = true;
} else if (AlertPublisher.PLUGIN_TYPE.equals(outputPluginType)) {
hasAlertOutput = true;
}
}
SparkCollection<Object> stageData = null;
Map<String, SparkCollection<Object>> inputDataCollections = new HashMap<>();
Set<String> stageInputs = pipelinePhase.getStageInputs(stageName);
for (String inputStageName : stageInputs) {
StageSpec inputStageSpec = pipelinePhase.getStage(inputStageName);
if (inputStageSpec == null) {
// means the input to this stage is in a separate phase. For example, it is an action.
continue;
}
String port = null;
// not errors or alerts or output port records
if (!Constants.Connector.PLUGIN_TYPE.equals(inputStageSpec.getPluginType()) && !Constants.Connector.PLUGIN_TYPE.equals(pluginType)) {
port = inputStageSpec.getOutputPorts().get(stageName).getPort();
}
SparkCollection<Object> inputRecords = port == null ? emittedRecords.get(inputStageName).outputRecords : emittedRecords.get(inputStageName).outputPortRecords.get(port);
inputDataCollections.put(inputStageName, inputRecords);
}
// initialize the stageRDD as the union of all input RDDs.
if (!inputDataCollections.isEmpty()) {
Iterator<SparkCollection<Object>> inputCollectionIter = inputDataCollections.values().iterator();
stageData = inputCollectionIter.next();
// don't union inputs records if we're joining or if we're processing errors
while (!BatchJoiner.PLUGIN_TYPE.equals(pluginType) && !ErrorTransform.PLUGIN_TYPE.equals(pluginType) && inputCollectionIter.hasNext()) {
stageData = stageData.union(inputCollectionIter.next());
}
}
boolean isConnectorSource = Constants.Connector.PLUGIN_TYPE.equals(pluginType) && pipelinePhase.getSources().contains(stageName);
boolean isConnectorSink = Constants.Connector.PLUGIN_TYPE.equals(pluginType) && pipelinePhase.getSinks().contains(stageName);
StageStatisticsCollector collector = collectors.get(stageName) == null ? new NoopStageStatisticsCollector() : collectors.get(stageName);
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, collector);
if (stageData == null) {
// null in the other else-if conditions
if (sourcePluginType.equals(pluginType) || isConnectorSource) {
SparkCollection<RecordInfo<Object>> combinedData = getSource(stageSpec, functionCacheFactory, collector);
emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, groupedDag, branchers, shufflers, hasErrorOutput, hasAlertOutput);
} else {
throw new IllegalStateException(String.format("Stage '%s' has no input and is not a source.", stageName));
}
} else if (BatchSink.PLUGIN_TYPE.equals(pluginType) || isConnectorSink) {
sinkRunnables.add(stageData.createStoreTask(stageSpec, new BatchSinkFunction(pluginFunctionContext, functionCacheFactory.newCache())));
} else if (SparkSink.PLUGIN_TYPE.equals(pluginType)) {
SparkSink<Object> sparkSink = pluginContext.newPluginInstance(stageName, macroEvaluator);
sinkRunnables.add(stageData.createStoreTask(stageSpec, sparkSink));
} else if (AlertPublisher.PLUGIN_TYPE.equals(pluginType)) {
// union all the alerts coming into this stage
SparkCollection<Alert> inputAlerts = null;
for (String inputStage : stageInputs) {
SparkCollection<Alert> inputErrorsFromStage = emittedRecords.get(inputStage).alertRecords;
if (inputErrorsFromStage == null) {
continue;
}
if (inputAlerts == null) {
inputAlerts = inputErrorsFromStage;
} else {
inputAlerts = inputAlerts.union(inputErrorsFromStage);
}
}
if (inputAlerts != null) {
inputAlerts.publishAlerts(stageSpec, collector);
}
} else if (ErrorTransform.PLUGIN_TYPE.equals(pluginType)) {
// union all the errors coming into this stage
SparkCollection<ErrorRecord<Object>> inputErrors = null;
for (String inputStage : stageInputs) {
SparkCollection<ErrorRecord<Object>> inputErrorsFromStage = emittedRecords.get(inputStage).errorRecords;
if (inputErrorsFromStage == null) {
continue;
}
if (inputErrors == null) {
inputErrors = inputErrorsFromStage;
} else {
inputErrors = inputErrors.union(inputErrorsFromStage);
}
}
if (inputErrors != null) {
SparkCollection<RecordInfo<Object>> combinedData = inputErrors.flatMap(stageSpec, new ErrorTransformFunction<Object, Object>(pluginFunctionContext, functionCacheFactory.newCache()));
emittedBuilder = addEmitted(emittedBuilder, pipelinePhase, stageSpec, combinedData, groupedDag, branchers, shufflers, hasErrorOutput, hasAlertOutput);
}
} else {
Object plugin = pluginContext.newPluginInstance(stageName, macroEvaluator);
Optional<EmittedRecords.Builder> declarativeBuilder = tryRelationalTransform(pipelinePhase, groupedDag, branchers, shufflers, stageName, stageSpec, emittedBuilder, hasErrorOutput, hasAlertOutput, stageData, inputDataCollections, plugin);
if (declarativeBuilder.isPresent()) {
emittedBuilder = declarativeBuilder.get();
} else {
emittedBuilder = transform(emittedBuilder, stagePartitions, pipelinePhase, functionCacheFactory, groupedDag, branchers, shufflers, stageName, stageSpec, pluginType, hasErrorOutput, hasAlertOutput, stageData, inputDataCollections, collector, pluginFunctionContext, plugin);
}
}
emittedRecords.put(stageName, emittedBuilder.build());
}
boolean shouldWriteInParallel = Boolean.parseBoolean(sec.getRuntimeArguments().get("pipeline.spark.parallel.sinks.enabled"));
if (!shouldWriteInParallel) {
for (Runnable runnable : sinkRunnables) {
runnable.run();
}
return;
}
Collection<Future> sinkFutures = new ArrayList<>(sinkRunnables.size());
ExecutorService executorService = Executors.newFixedThreadPool(sinkRunnables.size(), new ThreadFactoryBuilder().setNameFormat("pipeline-sink-task").build());
for (Runnable runnable : sinkRunnables) {
sinkFutures.add(executorService.submit(runnable));
}
Throwable error = null;
Iterator<Future> futureIter = sinkFutures.iterator();
for (Future future : sinkFutures) {
try {
future.get();
} catch (ExecutionException e) {
error = e.getCause();
break;
} catch (InterruptedException e) {
break;
}
}
executorService.shutdownNow();
if (error != null) {
throw Throwables.propagate(error);
}
}
Aggregations