Search in sources :

Example 1 with EventLoggingListener

use of org.apache.spark.scheduler.EventLoggingListener in project beam by apache.

the class SparkCommon method startEventLoggingListener.

/**
 * Starts an EventLoggingListener to save Beam Metrics on Spark's History Server if event logging
 * is enabled.
 *
 * @return The associated EventLoggingListener or null if it could not be started.
 */
@Nullable
public static EventLoggingListener startEventLoggingListener(final JavaSparkContext jsc, SparkPipelineOptions pipelineOptions, long startTime) {
    EventLoggingListener eventLoggingListener = null;
    try {
        if (jsc.getConf().getBoolean("spark.eventLog.enabled", false)) {
            eventLoggingListener = new EventLoggingListener(jsc.getConf().getAppId(), scala.Option.apply("1"), new URI(jsc.getConf().get("spark.eventLog.dir", null)), jsc.getConf(), jsc.hadoopConfiguration());
            eventLoggingListener.initializeLogIfNecessary(false, false);
            eventLoggingListener.start();
            scala.collection.immutable.Map<String, String> logUrlMap = new scala.collection.immutable.HashMap<>();
            Tuple2<String, String>[] sparkMasters = jsc.getConf().getAllWithPrefix("spark.master");
            Tuple2<String, String>[] sparkExecutors = jsc.getConf().getAllWithPrefix("spark.executor.id");
            for (Tuple2<String, String> sparkExecutor : sparkExecutors) {
                eventLoggingListener.onExecutorAdded(new SparkListenerExecutorAdded(startTime, sparkExecutor._2(), new ExecutorInfo(sparkMasters[0]._2(), 0, logUrlMap)));
            }
            return eventLoggingListener;
        }
    } catch (URISyntaxException e) {
        throw new RuntimeException("The URI syntax in the Spark config \"spark.eventLog.dir\" is not correct", e);
    }
    return eventLoggingListener;
}
Also used : URISyntaxException(java.net.URISyntaxException) URI(java.net.URI) ExecutorInfo(org.apache.spark.scheduler.cluster.ExecutorInfo) Tuple2(scala.Tuple2) SparkListenerExecutorAdded(org.apache.spark.scheduler.SparkListenerExecutorAdded) EventLoggingListener(org.apache.spark.scheduler.EventLoggingListener) Nullable(org.checkerframework.checker.nullness.qual.Nullable)

Example 2 with EventLoggingListener

use of org.apache.spark.scheduler.EventLoggingListener in project beam by apache.

the class SparkPipelineRunner method run.

@Override
public PortablePipelineResult run(RunnerApi.Pipeline pipeline, JobInfo jobInfo) {
    SparkPortablePipelineTranslator translator;
    boolean isStreaming = pipelineOptions.isStreaming() || hasUnboundedPCollections(pipeline);
    if (isStreaming) {
        translator = new SparkStreamingPortablePipelineTranslator();
    } else {
        translator = new SparkBatchPortablePipelineTranslator();
    }
    // Expand any splittable DoFns within the graph to enable sizing and splitting of bundles.
    Pipeline pipelineWithSdfExpanded = ProtoOverrides.updateTransform(PTransformTranslation.PAR_DO_TRANSFORM_URN, pipeline, SplittableParDoExpander.createSizedReplacement());
    // Don't let the fuser fuse any subcomponents of native transforms.
    Pipeline trimmedPipeline = TrivialNativeTransformExpander.forKnownUrns(pipelineWithSdfExpanded, translator.knownUrns());
    // Fused pipeline proto.
    // TODO: Consider supporting partially-fused graphs.
    RunnerApi.Pipeline fusedPipeline = trimmedPipeline.getComponents().getTransformsMap().values().stream().anyMatch(proto -> ExecutableStage.URN.equals(proto.getSpec().getUrn())) ? trimmedPipeline : GreedyPipelineFuser.fuse(trimmedPipeline).toPipeline();
    prepareFilesToStage(pipelineOptions);
    PortablePipelineResult result;
    final JavaSparkContext jsc = SparkContextFactory.getSparkContext(pipelineOptions);
    final long startTime = Instant.now().getMillis();
    EventLoggingListener eventLoggingListener = startEventLoggingListener(jsc, pipelineOptions, startTime);
    // Initialize accumulators.
    AggregatorsAccumulator.init(pipelineOptions, jsc);
    MetricsEnvironment.setMetricsSupported(true);
    MetricsAccumulator.init(pipelineOptions, jsc);
    final SparkTranslationContext context = translator.createTranslationContext(jsc, pipelineOptions, jobInfo);
    final ExecutorService executorService = Executors.newSingleThreadExecutor();
    LOG.info(String.format("Running job %s on Spark master %s", jobInfo.jobId(), jsc.master()));
    if (isStreaming) {
        final JavaStreamingContext jssc = ((SparkStreamingTranslationContext) context).getStreamingContext();
        jssc.addStreamingListener(new JavaStreamingListenerWrapper(new AggregatorsAccumulator.AccumulatorCheckpointingSparkListener()));
        jssc.addStreamingListener(new JavaStreamingListenerWrapper(new MetricsAccumulator.AccumulatorCheckpointingSparkListener()));
        // Register user-defined listeners.
        for (JavaStreamingListener listener : pipelineOptions.as(SparkContextOptions.class).getListeners()) {
            LOG.info("Registered listener {}." + listener.getClass().getSimpleName());
            jssc.addStreamingListener(new JavaStreamingListenerWrapper(listener));
        }
        // Register Watermarks listener to broadcast the advanced WMs.
        jssc.addStreamingListener(new JavaStreamingListenerWrapper(new GlobalWatermarkHolder.WatermarkAdvancingStreamingListener()));
        jssc.checkpoint(pipelineOptions.getCheckpointDir());
        // Obtain timeout from options.
        Long timeout = pipelineOptions.as(SparkPortableStreamingPipelineOptions.class).getStreamingTimeoutMs();
        final Future<?> submissionFuture = executorService.submit(() -> {
            translator.translate(fusedPipeline, context);
            LOG.info(String.format("Job %s: Pipeline translated successfully. Computing outputs", jobInfo.jobId()));
            context.computeOutputs();
            jssc.start();
            try {
                jssc.awaitTerminationOrTimeout(timeout);
            } catch (InterruptedException e) {
                LOG.warn("Streaming context interrupted, shutting down.", e);
            }
            jssc.stop();
            LOG.info(String.format("Job %s finished.", jobInfo.jobId()));
        });
        result = new SparkPipelineResult.PortableStreamingMode(submissionFuture, jssc);
    } else {
        final Future<?> submissionFuture = executorService.submit(() -> {
            translator.translate(fusedPipeline, context);
            LOG.info(String.format("Job %s: Pipeline translated successfully. Computing outputs", jobInfo.jobId()));
            context.computeOutputs();
            LOG.info(String.format("Job %s finished.", jobInfo.jobId()));
        });
        result = new SparkPipelineResult.PortableBatchMode(submissionFuture, jsc);
    }
    executorService.shutdown();
    result.waitUntilFinish();
    MetricsPusher metricsPusher = new MetricsPusher(MetricsAccumulator.getInstance().value(), pipelineOptions.as(MetricsOptions.class), result);
    metricsPusher.start();
    if (eventLoggingListener != null) {
        eventLoggingListener.onApplicationStart(SparkCompat.buildSparkListenerApplicationStart(jsc, pipelineOptions, startTime, result));
        eventLoggingListener.onApplicationEnd(new SparkListenerApplicationEnd(Instant.now().getMillis()));
        eventLoggingListener.stop();
    }
    return result;
}
Also used : MetricsAccumulator(org.apache.beam.runners.spark.metrics.MetricsAccumulator) ArtifactApi(org.apache.beam.model.jobmanagement.v1.ArtifactApi) LoggerFactory(org.slf4j.LoggerFactory) GreedyPipelineFuser(org.apache.beam.runners.core.construction.graph.GreedyPipelineFuser) PortablePipelineRunner(org.apache.beam.runners.jobsubmission.PortablePipelineRunner) SparkCompat(org.apache.beam.runners.spark.util.SparkCompat) Future(java.util.concurrent.Future) JobInfo(org.apache.beam.runners.fnexecution.provisioning.JobInfo) SparkListenerApplicationEnd(org.apache.spark.scheduler.SparkListenerApplicationEnd) SparkStreamingPortablePipelineTranslator(org.apache.beam.runners.spark.translation.SparkStreamingPortablePipelineTranslator) CmdLineParser(org.kohsuke.args4j.CmdLineParser) PTransformTranslation(org.apache.beam.runners.core.construction.PTransformTranslation) SparkPortablePipelineTranslator(org.apache.beam.runners.spark.translation.SparkPortablePipelineTranslator) Struct(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.Struct) UUID(java.util.UUID) TrivialNativeTransformExpander(org.apache.beam.runners.core.construction.graph.TrivialNativeTransformExpander) Option(org.kohsuke.args4j.Option) ExecutableStage(org.apache.beam.runners.core.construction.graph.ExecutableStage) Executors(java.util.concurrent.Executors) MetricsPusher(org.apache.beam.runners.core.metrics.MetricsPusher) CmdLineException(org.kohsuke.args4j.CmdLineException) ProtoOverrides(org.apache.beam.runners.core.construction.graph.ProtoOverrides) AggregatorsAccumulator(org.apache.beam.runners.spark.aggregators.AggregatorsAccumulator) JavaStreamingContext(org.apache.spark.streaming.api.java.JavaStreamingContext) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) PipelineOptionsTranslation(org.apache.beam.runners.core.construction.PipelineOptionsTranslation) PipelineOptionsFactory(org.apache.beam.sdk.options.PipelineOptionsFactory) SparkCommon.startEventLoggingListener(org.apache.beam.runners.spark.util.SparkCommon.startEventLoggingListener) SparkBatchPortablePipelineTranslator(org.apache.beam.runners.spark.translation.SparkBatchPortablePipelineTranslator) PortablePipelineResult(org.apache.beam.runners.jobsubmission.PortablePipelineResult) SparkTranslationContext(org.apache.beam.runners.spark.translation.SparkTranslationContext) PipelineTranslatorUtils.hasUnboundedPCollections(org.apache.beam.runners.fnexecution.translation.PipelineTranslatorUtils.hasUnboundedPCollections) GlobalWatermarkHolder(org.apache.beam.runners.spark.util.GlobalWatermarkHolder) JavaStreamingListenerWrapper(org.apache.spark.streaming.api.java.JavaStreamingListenerWrapper) ExecutorService(java.util.concurrent.ExecutorService) RunnerApi(org.apache.beam.model.pipeline.v1.RunnerApi) JavaStreamingListener(org.apache.spark.streaming.api.java.JavaStreamingListener) Logger(org.slf4j.Logger) PortablePipelineJarUtils(org.apache.beam.runners.jobsubmission.PortablePipelineJarUtils) SparkStreamingTranslationContext(org.apache.beam.runners.spark.translation.SparkStreamingTranslationContext) SparkContextFactory(org.apache.beam.runners.spark.translation.SparkContextFactory) SplittableParDoExpander(org.apache.beam.runners.core.construction.graph.SplittableParDoExpander) MetricsEnvironment(org.apache.beam.sdk.metrics.MetricsEnvironment) MetricsOptions(org.apache.beam.sdk.metrics.MetricsOptions) Pipeline(org.apache.beam.model.pipeline.v1.RunnerApi.Pipeline) SparkCommonPipelineOptions.prepareFilesToStage(org.apache.beam.runners.spark.SparkCommonPipelineOptions.prepareFilesToStage) Preconditions(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions) Instant(org.joda.time.Instant) Nullable(edu.umd.cs.findbugs.annotations.Nullable) EventLoggingListener(org.apache.spark.scheduler.EventLoggingListener) FileSystems(org.apache.beam.sdk.io.FileSystems) MetricsOptions(org.apache.beam.sdk.metrics.MetricsOptions) SparkPortablePipelineTranslator(org.apache.beam.runners.spark.translation.SparkPortablePipelineTranslator) SparkBatchPortablePipelineTranslator(org.apache.beam.runners.spark.translation.SparkBatchPortablePipelineTranslator) RunnerApi(org.apache.beam.model.pipeline.v1.RunnerApi) JavaStreamingContext(org.apache.spark.streaming.api.java.JavaStreamingContext) SparkListenerApplicationEnd(org.apache.spark.scheduler.SparkListenerApplicationEnd) PortablePipelineResult(org.apache.beam.runners.jobsubmission.PortablePipelineResult) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) MetricsPusher(org.apache.beam.runners.core.metrics.MetricsPusher) SparkCommon.startEventLoggingListener(org.apache.beam.runners.spark.util.SparkCommon.startEventLoggingListener) EventLoggingListener(org.apache.spark.scheduler.EventLoggingListener) SparkTranslationContext(org.apache.beam.runners.spark.translation.SparkTranslationContext) SparkStreamingPortablePipelineTranslator(org.apache.beam.runners.spark.translation.SparkStreamingPortablePipelineTranslator) JavaStreamingListenerWrapper(org.apache.spark.streaming.api.java.JavaStreamingListenerWrapper) Pipeline(org.apache.beam.model.pipeline.v1.RunnerApi.Pipeline) JavaStreamingListener(org.apache.spark.streaming.api.java.JavaStreamingListener) Pipeline(org.apache.beam.model.pipeline.v1.RunnerApi.Pipeline) SparkStreamingTranslationContext(org.apache.beam.runners.spark.translation.SparkStreamingTranslationContext) ExecutorService(java.util.concurrent.ExecutorService)

Example 3 with EventLoggingListener

use of org.apache.spark.scheduler.EventLoggingListener in project beam by apache.

the class SparkRunner method run.

@Override
public SparkPipelineResult run(final Pipeline pipeline) {
    LOG.info("Executing pipeline using the SparkRunner.");
    final SparkPipelineResult result;
    final Future<?> startPipeline;
    final SparkPipelineTranslator translator;
    final ExecutorService executorService = Executors.newSingleThreadExecutor();
    MetricsEnvironment.setMetricsSupported(true);
    // visit the pipeline to determine the translation mode
    detectTranslationMode(pipeline);
    // TODO(BEAM-10670): Use SDF read as default when we address performance issue.
    if (!ExperimentalOptions.hasExperiment(pipeline.getOptions(), "beam_fn_api")) {
        SplittableParDo.convertReadBasedSplittableDoFnsToPrimitiveReadsIfNecessary(pipeline);
    }
    pipeline.replaceAll(SparkTransformOverrides.getDefaultOverrides(pipelineOptions.isStreaming()));
    prepareFilesToStage(pipelineOptions);
    final long startTime = Instant.now().getMillis();
    EventLoggingListener eventLoggingListener = null;
    JavaSparkContext jsc = null;
    if (pipelineOptions.isStreaming()) {
        CheckpointDir checkpointDir = new CheckpointDir(pipelineOptions.getCheckpointDir());
        SparkRunnerStreamingContextFactory streamingContextFactory = new SparkRunnerStreamingContextFactory(pipeline, pipelineOptions, checkpointDir);
        final JavaStreamingContext jssc = JavaStreamingContext.getOrCreate(checkpointDir.getSparkCheckpointDir().toString(), streamingContextFactory);
        jsc = jssc.sparkContext();
        eventLoggingListener = startEventLoggingListener(jsc, pipelineOptions, startTime);
        // Checkpoint aggregator/metrics values
        jssc.addStreamingListener(new JavaStreamingListenerWrapper(new AggregatorsAccumulator.AccumulatorCheckpointingSparkListener()));
        jssc.addStreamingListener(new JavaStreamingListenerWrapper(new MetricsAccumulator.AccumulatorCheckpointingSparkListener()));
        // register user-defined listeners.
        for (JavaStreamingListener listener : pipelineOptions.as(SparkContextOptions.class).getListeners()) {
            LOG.info("Registered listener {}." + listener.getClass().getSimpleName());
            jssc.addStreamingListener(new JavaStreamingListenerWrapper(listener));
        }
        // register Watermarks listener to broadcast the advanced WMs.
        jssc.addStreamingListener(new JavaStreamingListenerWrapper(new WatermarkAdvancingStreamingListener()));
        // The reason we call initAccumulators here even though it is called in
        // SparkRunnerStreamingContextFactory is because the factory is not called when resuming
        // from checkpoint (When not resuming from checkpoint initAccumulators will be called twice
        // but this is fine since it is idempotent).
        initAccumulators(pipelineOptions, jssc.sparkContext());
        startPipeline = executorService.submit(() -> {
            LOG.info("Starting streaming pipeline execution.");
            jssc.start();
        });
        executorService.shutdown();
        result = new SparkPipelineResult.StreamingMode(startPipeline, jssc);
    } else {
        jsc = SparkContextFactory.getSparkContext(pipelineOptions);
        eventLoggingListener = startEventLoggingListener(jsc, pipelineOptions, startTime);
        final EvaluationContext evaluationContext = new EvaluationContext(jsc, pipeline, pipelineOptions);
        translator = new TransformTranslator.Translator();
        // update the cache candidates
        updateCacheCandidates(pipeline, translator, evaluationContext);
        initAccumulators(pipelineOptions, jsc);
        startPipeline = executorService.submit(() -> {
            pipeline.traverseTopologically(new Evaluator(translator, evaluationContext));
            evaluationContext.computeOutputs();
            LOG.info("Batch pipeline execution complete.");
        });
        executorService.shutdown();
        result = new SparkPipelineResult.BatchMode(startPipeline, jsc);
    }
    if (pipelineOptions.getEnableSparkMetricSinks()) {
        registerMetricsSource(pipelineOptions.getAppName());
    }
    // it would have been better to create MetricsPusher from runner-core but we need
    // runner-specific
    // MetricsContainerStepMap
    MetricsPusher metricsPusher = new MetricsPusher(MetricsAccumulator.getInstance().value(), pipelineOptions.as(MetricsOptions.class), result);
    metricsPusher.start();
    if (eventLoggingListener != null && jsc != null) {
        eventLoggingListener.onApplicationStart(SparkCompat.buildSparkListenerApplicationStart(jsc, pipelineOptions, startTime, result));
        eventLoggingListener.onApplicationEnd(new SparkListenerApplicationEnd(Instant.now().getMillis()));
        eventLoggingListener.stop();
    }
    return result;
}
Also used : MetricsOptions(org.apache.beam.sdk.metrics.MetricsOptions) JavaStreamingListenerWrapper(org.apache.spark.streaming.api.java.JavaStreamingListenerWrapper) JavaStreamingListener(org.apache.spark.streaming.api.java.JavaStreamingListener) TransformEvaluator(org.apache.beam.runners.spark.translation.TransformEvaluator) SparkRunnerStreamingContextFactory(org.apache.beam.runners.spark.translation.streaming.SparkRunnerStreamingContextFactory) JavaStreamingContext(org.apache.spark.streaming.api.java.JavaStreamingContext) TransformTranslator(org.apache.beam.runners.spark.translation.TransformTranslator) SparkListenerApplicationEnd(org.apache.spark.scheduler.SparkListenerApplicationEnd) ExecutorService(java.util.concurrent.ExecutorService) WatermarkAdvancingStreamingListener(org.apache.beam.runners.spark.util.GlobalWatermarkHolder.WatermarkAdvancingStreamingListener) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) EvaluationContext(org.apache.beam.runners.spark.translation.EvaluationContext) MetricsPusher(org.apache.beam.runners.core.metrics.MetricsPusher) SparkPipelineTranslator(org.apache.beam.runners.spark.translation.SparkPipelineTranslator) CheckpointDir(org.apache.beam.runners.spark.translation.streaming.Checkpoint.CheckpointDir) SparkCommon.startEventLoggingListener(org.apache.beam.runners.spark.util.SparkCommon.startEventLoggingListener) EventLoggingListener(org.apache.spark.scheduler.EventLoggingListener)

Aggregations

EventLoggingListener (org.apache.spark.scheduler.EventLoggingListener)3 ExecutorService (java.util.concurrent.ExecutorService)2 MetricsPusher (org.apache.beam.runners.core.metrics.MetricsPusher)2 SparkCommon.startEventLoggingListener (org.apache.beam.runners.spark.util.SparkCommon.startEventLoggingListener)2 MetricsOptions (org.apache.beam.sdk.metrics.MetricsOptions)2 Nullable (edu.umd.cs.findbugs.annotations.Nullable)1 URI (java.net.URI)1 URISyntaxException (java.net.URISyntaxException)1 UUID (java.util.UUID)1 Executors (java.util.concurrent.Executors)1 Future (java.util.concurrent.Future)1 ArtifactApi (org.apache.beam.model.jobmanagement.v1.ArtifactApi)1 RunnerApi (org.apache.beam.model.pipeline.v1.RunnerApi)1 Pipeline (org.apache.beam.model.pipeline.v1.RunnerApi.Pipeline)1 PTransformTranslation (org.apache.beam.runners.core.construction.PTransformTranslation)1 PipelineOptionsTranslation (org.apache.beam.runners.core.construction.PipelineOptionsTranslation)1 ExecutableStage (org.apache.beam.runners.core.construction.graph.ExecutableStage)1 GreedyPipelineFuser (org.apache.beam.runners.core.construction.graph.GreedyPipelineFuser)1 ProtoOverrides (org.apache.beam.runners.core.construction.graph.ProtoOverrides)1 SplittableParDoExpander (org.apache.beam.runners.core.construction.graph.SplittableParDoExpander)1