use of org.apache.beam.runners.core.metrics.MetricsPusher in project beam by apache.
the class FlinkRunner method createPipelineResult.
static PipelineResult createPipelineResult(JobExecutionResult result, PipelineOptions options) {
String resultClassName = result.getClass().getCanonicalName();
if (resultClassName.equals("org.apache.flink.core.execution.DetachedJobExecutionResult")) {
LOG.info("Pipeline submitted in Detached mode");
// no metricsPusher because metrics are not supported in detached mode
return new FlinkDetachedRunnerResult();
} else {
LOG.info("Execution finished in {} msecs", result.getNetRuntime());
Map<String, Object> accumulators = result.getAllAccumulatorResults();
if (accumulators != null && !accumulators.isEmpty()) {
LOG.info("Final accumulator values:");
for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) {
LOG.info("{} : {}", entry.getKey(), entry.getValue());
}
}
FlinkRunnerResult flinkRunnerResult = new FlinkRunnerResult(accumulators, result.getNetRuntime());
MetricsPusher metricsPusher = new MetricsPusher(flinkRunnerResult.getMetricsContainerStepMap(), options.as(MetricsOptions.class), flinkRunnerResult);
metricsPusher.start();
return flinkRunnerResult;
}
}
use of org.apache.beam.runners.core.metrics.MetricsPusher in project beam by apache.
the class SparkPipelineRunner method run.
@Override
public PortablePipelineResult run(RunnerApi.Pipeline pipeline, JobInfo jobInfo) {
SparkPortablePipelineTranslator translator;
boolean isStreaming = pipelineOptions.isStreaming() || hasUnboundedPCollections(pipeline);
if (isStreaming) {
translator = new SparkStreamingPortablePipelineTranslator();
} else {
translator = new SparkBatchPortablePipelineTranslator();
}
// Expand any splittable DoFns within the graph to enable sizing and splitting of bundles.
Pipeline pipelineWithSdfExpanded = ProtoOverrides.updateTransform(PTransformTranslation.PAR_DO_TRANSFORM_URN, pipeline, SplittableParDoExpander.createSizedReplacement());
// Don't let the fuser fuse any subcomponents of native transforms.
Pipeline trimmedPipeline = TrivialNativeTransformExpander.forKnownUrns(pipelineWithSdfExpanded, translator.knownUrns());
// Fused pipeline proto.
// TODO: Consider supporting partially-fused graphs.
RunnerApi.Pipeline fusedPipeline = trimmedPipeline.getComponents().getTransformsMap().values().stream().anyMatch(proto -> ExecutableStage.URN.equals(proto.getSpec().getUrn())) ? trimmedPipeline : GreedyPipelineFuser.fuse(trimmedPipeline).toPipeline();
prepareFilesToStage(pipelineOptions);
PortablePipelineResult result;
final JavaSparkContext jsc = SparkContextFactory.getSparkContext(pipelineOptions);
final long startTime = Instant.now().getMillis();
EventLoggingListener eventLoggingListener = startEventLoggingListener(jsc, pipelineOptions, startTime);
// Initialize accumulators.
AggregatorsAccumulator.init(pipelineOptions, jsc);
MetricsEnvironment.setMetricsSupported(true);
MetricsAccumulator.init(pipelineOptions, jsc);
final SparkTranslationContext context = translator.createTranslationContext(jsc, pipelineOptions, jobInfo);
final ExecutorService executorService = Executors.newSingleThreadExecutor();
LOG.info(String.format("Running job %s on Spark master %s", jobInfo.jobId(), jsc.master()));
if (isStreaming) {
final JavaStreamingContext jssc = ((SparkStreamingTranslationContext) context).getStreamingContext();
jssc.addStreamingListener(new JavaStreamingListenerWrapper(new AggregatorsAccumulator.AccumulatorCheckpointingSparkListener()));
jssc.addStreamingListener(new JavaStreamingListenerWrapper(new MetricsAccumulator.AccumulatorCheckpointingSparkListener()));
// Register user-defined listeners.
for (JavaStreamingListener listener : pipelineOptions.as(SparkContextOptions.class).getListeners()) {
LOG.info("Registered listener {}." + listener.getClass().getSimpleName());
jssc.addStreamingListener(new JavaStreamingListenerWrapper(listener));
}
// Register Watermarks listener to broadcast the advanced WMs.
jssc.addStreamingListener(new JavaStreamingListenerWrapper(new GlobalWatermarkHolder.WatermarkAdvancingStreamingListener()));
jssc.checkpoint(pipelineOptions.getCheckpointDir());
// Obtain timeout from options.
Long timeout = pipelineOptions.as(SparkPortableStreamingPipelineOptions.class).getStreamingTimeoutMs();
final Future<?> submissionFuture = executorService.submit(() -> {
translator.translate(fusedPipeline, context);
LOG.info(String.format("Job %s: Pipeline translated successfully. Computing outputs", jobInfo.jobId()));
context.computeOutputs();
jssc.start();
try {
jssc.awaitTerminationOrTimeout(timeout);
} catch (InterruptedException e) {
LOG.warn("Streaming context interrupted, shutting down.", e);
}
jssc.stop();
LOG.info(String.format("Job %s finished.", jobInfo.jobId()));
});
result = new SparkPipelineResult.PortableStreamingMode(submissionFuture, jssc);
} else {
final Future<?> submissionFuture = executorService.submit(() -> {
translator.translate(fusedPipeline, context);
LOG.info(String.format("Job %s: Pipeline translated successfully. Computing outputs", jobInfo.jobId()));
context.computeOutputs();
LOG.info(String.format("Job %s finished.", jobInfo.jobId()));
});
result = new SparkPipelineResult.PortableBatchMode(submissionFuture, jsc);
}
executorService.shutdown();
result.waitUntilFinish();
MetricsPusher metricsPusher = new MetricsPusher(MetricsAccumulator.getInstance().value(), pipelineOptions.as(MetricsOptions.class), result);
metricsPusher.start();
if (eventLoggingListener != null) {
eventLoggingListener.onApplicationStart(SparkCompat.buildSparkListenerApplicationStart(jsc, pipelineOptions, startTime, result));
eventLoggingListener.onApplicationEnd(new SparkListenerApplicationEnd(Instant.now().getMillis()));
eventLoggingListener.stop();
}
return result;
}
use of org.apache.beam.runners.core.metrics.MetricsPusher in project beam by apache.
the class FlinkPipelineRunner method createPortablePipelineResult.
private PortablePipelineResult createPortablePipelineResult(JobExecutionResult result, PipelineOptions options) {
String resultClassName = result.getClass().getCanonicalName();
if (resultClassName.equals("org.apache.flink.core.execution.DetachedJobExecutionResult")) {
LOG.info("Pipeline submitted in Detached mode");
// no metricsPusher because metrics are not supported in detached mode
return new FlinkPortableRunnerResult.Detached();
} else {
LOG.info("Execution finished in {} msecs", result.getNetRuntime());
Map<String, Object> accumulators = result.getAllAccumulatorResults();
if (accumulators != null && !accumulators.isEmpty()) {
LOG.info("Final accumulator values:");
for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) {
LOG.info("{} : {}", entry.getKey(), entry.getValue());
}
}
FlinkPortableRunnerResult flinkRunnerResult = new FlinkPortableRunnerResult(accumulators, result.getNetRuntime());
MetricsPusher metricsPusher = new MetricsPusher(flinkRunnerResult.getMetricsContainerStepMap(), options.as(MetricsOptions.class), flinkRunnerResult);
metricsPusher.start();
return flinkRunnerResult;
}
}
use of org.apache.beam.runners.core.metrics.MetricsPusher in project beam by apache.
the class SparkRunner method run.
@Override
public SparkPipelineResult run(final Pipeline pipeline) {
LOG.info("Executing pipeline using the SparkRunner.");
final SparkPipelineResult result;
final Future<?> startPipeline;
final SparkPipelineTranslator translator;
final ExecutorService executorService = Executors.newSingleThreadExecutor();
MetricsEnvironment.setMetricsSupported(true);
// visit the pipeline to determine the translation mode
detectTranslationMode(pipeline);
// TODO(BEAM-10670): Use SDF read as default when we address performance issue.
if (!ExperimentalOptions.hasExperiment(pipeline.getOptions(), "beam_fn_api")) {
SplittableParDo.convertReadBasedSplittableDoFnsToPrimitiveReadsIfNecessary(pipeline);
}
pipeline.replaceAll(SparkTransformOverrides.getDefaultOverrides(pipelineOptions.isStreaming()));
prepareFilesToStage(pipelineOptions);
final long startTime = Instant.now().getMillis();
EventLoggingListener eventLoggingListener = null;
JavaSparkContext jsc = null;
if (pipelineOptions.isStreaming()) {
CheckpointDir checkpointDir = new CheckpointDir(pipelineOptions.getCheckpointDir());
SparkRunnerStreamingContextFactory streamingContextFactory = new SparkRunnerStreamingContextFactory(pipeline, pipelineOptions, checkpointDir);
final JavaStreamingContext jssc = JavaStreamingContext.getOrCreate(checkpointDir.getSparkCheckpointDir().toString(), streamingContextFactory);
jsc = jssc.sparkContext();
eventLoggingListener = startEventLoggingListener(jsc, pipelineOptions, startTime);
// Checkpoint aggregator/metrics values
jssc.addStreamingListener(new JavaStreamingListenerWrapper(new AggregatorsAccumulator.AccumulatorCheckpointingSparkListener()));
jssc.addStreamingListener(new JavaStreamingListenerWrapper(new MetricsAccumulator.AccumulatorCheckpointingSparkListener()));
// register user-defined listeners.
for (JavaStreamingListener listener : pipelineOptions.as(SparkContextOptions.class).getListeners()) {
LOG.info("Registered listener {}." + listener.getClass().getSimpleName());
jssc.addStreamingListener(new JavaStreamingListenerWrapper(listener));
}
// register Watermarks listener to broadcast the advanced WMs.
jssc.addStreamingListener(new JavaStreamingListenerWrapper(new WatermarkAdvancingStreamingListener()));
// The reason we call initAccumulators here even though it is called in
// SparkRunnerStreamingContextFactory is because the factory is not called when resuming
// from checkpoint (When not resuming from checkpoint initAccumulators will be called twice
// but this is fine since it is idempotent).
initAccumulators(pipelineOptions, jssc.sparkContext());
startPipeline = executorService.submit(() -> {
LOG.info("Starting streaming pipeline execution.");
jssc.start();
});
executorService.shutdown();
result = new SparkPipelineResult.StreamingMode(startPipeline, jssc);
} else {
jsc = SparkContextFactory.getSparkContext(pipelineOptions);
eventLoggingListener = startEventLoggingListener(jsc, pipelineOptions, startTime);
final EvaluationContext evaluationContext = new EvaluationContext(jsc, pipeline, pipelineOptions);
translator = new TransformTranslator.Translator();
// update the cache candidates
updateCacheCandidates(pipeline, translator, evaluationContext);
initAccumulators(pipelineOptions, jsc);
startPipeline = executorService.submit(() -> {
pipeline.traverseTopologically(new Evaluator(translator, evaluationContext));
evaluationContext.computeOutputs();
LOG.info("Batch pipeline execution complete.");
});
executorService.shutdown();
result = new SparkPipelineResult.BatchMode(startPipeline, jsc);
}
if (pipelineOptions.getEnableSparkMetricSinks()) {
registerMetricsSource(pipelineOptions.getAppName());
}
// it would have been better to create MetricsPusher from runner-core but we need
// runner-specific
// MetricsContainerStepMap
MetricsPusher metricsPusher = new MetricsPusher(MetricsAccumulator.getInstance().value(), pipelineOptions.as(MetricsOptions.class), result);
metricsPusher.start();
if (eventLoggingListener != null && jsc != null) {
eventLoggingListener.onApplicationStart(SparkCompat.buildSparkListenerApplicationStart(jsc, pipelineOptions, startTime, result));
eventLoggingListener.onApplicationEnd(new SparkListenerApplicationEnd(Instant.now().getMillis()));
eventLoggingListener.stop();
}
return result;
}
use of org.apache.beam.runners.core.metrics.MetricsPusher in project beam by apache.
the class SparkStructuredStreamingRunner method run.
@Override
public SparkStructuredStreamingPipelineResult run(final Pipeline pipeline) {
MetricsEnvironment.setMetricsSupported(true);
LOG.info("*** SparkStructuredStreamingRunner is based on spark structured streaming framework and is no more \n" + " based on RDD/DStream API. See\n" + " https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html\n" + " It is still experimental, its coverage of the Beam model is partial. ***");
// clear state of Aggregators, Metrics and Watermarks if exists.
AggregatorsAccumulator.clear();
MetricsAccumulator.clear();
final AbstractTranslationContext translationContext = translatePipeline(pipeline);
final ExecutorService executorService = Executors.newSingleThreadExecutor();
final Future<?> submissionFuture = executorService.submit(() -> {
// TODO initialise other services: checkpointing, metrics system, listeners, ...
translationContext.startPipeline();
});
executorService.shutdown();
// TODO: Streaming.
SparkStructuredStreamingPipelineResult result = new SparkStructuredStreamingPipelineResult(submissionFuture, translationContext.getSparkSession());
if (options.getEnableSparkMetricSinks()) {
registerMetricsSource(options.getAppName());
}
MetricsPusher metricsPusher = new MetricsPusher(MetricsAccumulator.getInstance().value(), options.as(MetricsOptions.class), result);
metricsPusher.start();
if (options.getTestMode()) {
result.waitUntilFinish();
result.stop();
}
return result;
}
Aggregations