Search in sources :

Example 1 with PublishSubject

use of rx.subjects.PublishSubject in project Singularity by HubSpot.

the class SingularityMesosSchedulerClient method connect.

/**
 * Sets up the connection and is blocking in wait for calls from mesos
 * master.
 */
private void connect(URI mesosMasterURI, FrameworkInfo frameworkInfo, SingularityMesosScheduler scheduler) throws URISyntaxException {
    MesosClientBuilder<Call, Event> clientBuilder = ProtobufMesosClientBuilder.schedulerUsingProtos().mesosUri(mesosMasterURI).applicationUserAgentEntry(UserAgentEntries.userAgentEntryForMavenArtifact("com.hubspot.singularity", "SingularityService")).onBackpressureBuffer(scheduler.getEventBufferSize(), () -> {
        String message = String.format("Overflow of event buffer (%s), singularity could not keep up!", scheduler.getEventBufferSize());
        scheduler.onUncaughtException(new EventBufferOverflowException(message));
    }, BackpressureOverflow.ON_OVERFLOW_ERROR);
    Call subscribeCall = Call.newBuilder().setType(Call.Type.SUBSCRIBE).setFrameworkId(frameworkInfo.getId()).setSubscribe(Call.Subscribe.newBuilder().setFrameworkInfo(frameworkInfo).build()).build();
    MesosClientBuilder<Call, Event> subscribe = clientBuilder.subscribe(subscribeCall);
    subscribe.processStream(unicastEvents -> {
        final Observable<Event> events = unicastEvents.share();
        events.filter(event -> event.getType() == Event.Type.ERROR).map(event -> event.getError().getMessage()).subscribe(scheduler::error, scheduler::onUncaughtException);
        events.filter(event -> event.getType() == Event.Type.FAILURE).map(Event::getFailure).subscribe(scheduler::failure, scheduler::onUncaughtException);
        events.filter(event -> event.getType() == Event.Type.HEARTBEAT).subscribe(scheduler::heartbeat, scheduler::onUncaughtException);
        events.filter(event -> event.getType() == Event.Type.INVERSE_OFFERS).map(event -> event.getInverseOffers().getInverseOffersList()).subscribe(scheduler::inverseOffers, scheduler::onUncaughtException);
        events.filter(event -> event.getType() == Event.Type.MESSAGE).map(Event::getMessage).subscribe(scheduler::message, scheduler::onUncaughtException);
        events.filter(event -> event.getType() == Event.Type.OFFERS).map(event -> event.getOffers().getOffersList()).subscribe(scheduler::resourceOffers, scheduler::onUncaughtException);
        events.filter(event -> event.getType() == Event.Type.RESCIND).map(event -> event.getRescind().getOfferId()).subscribe(scheduler::rescind, scheduler::onUncaughtException);
        events.filter(event -> event.getType() == Event.Type.RESCIND_INVERSE_OFFER).map(event -> event.getRescindInverseOffer().getInverseOfferId()).subscribe(scheduler::rescindInverseOffer, scheduler::onUncaughtException);
        events.filter(event -> event.getType() == Event.Type.SUBSCRIBED).map(Event::getSubscribed).subscribe(subscribed -> {
            this.frameworkId = subscribed.getFrameworkId();
            scheduler.subscribed(subscribed);
        }, scheduler::onUncaughtException);
        events.filter(event -> event.getType() == Event.Type.UPDATE).map(event -> event.getUpdate().getStatus()).filter(status -> {
            if (!status.hasAgentId() || !status.getAgentId().hasValue()) {
                LOG.warn("Filtering out status update without agentId {}", status);
                return false;
            } else {
                return true;
            }
        }).subscribe(scheduler::statusUpdate, scheduler::onUncaughtException);
        // This is the observable that is responsible for sending calls to mesos master.
        PublishSubject<Optional<SinkOperation<Call>>> p = PublishSubject.create();
        // toSerialised handles the fact that we can add calls on different threads.
        publisher = p.toSerialized();
        return publisher;
    });
    MesosClient<Call, Event> client = clientBuilder.build();
    openStream = client.openStream();
    try {
        openStream.await();
    } catch (Throwable t) {
        LOG.error("Observable was unexpectedly closed", t);
        scheduler.onConnectException(t);
    }
}
Also used : MesosClientBuilder(com.mesosphere.mesos.rx.java.MesosClientBuilder) ProtobufMesosClientBuilder(com.mesosphere.mesos.rx.java.protobuf.ProtobufMesosClientBuilder) Reconcile(org.apache.mesos.v1.scheduler.Protos.Call.Reconcile) URISyntaxException(java.net.URISyntaxException) Inject(com.google.inject.Inject) UIConfiguration(com.hubspot.singularity.config.UIConfiguration) LoggerFactory(org.slf4j.LoggerFactory) SinkOperations(com.mesosphere.mesos.rx.java.SinkOperations) Offer(org.apache.mesos.v1.Protos.Offer) Event(org.apache.mesos.v1.scheduler.Protos.Event) FrameworkID(org.apache.mesos.v1.Protos.FrameworkID) Observable(rx.Observable) Type(org.apache.mesos.v1.scheduler.Protos.Call.Type) Filters(org.apache.mesos.v1.Protos.Filters) MesosClient(com.mesosphere.mesos.rx.java.MesosClient) SerializedSubject(rx.subjects.SerializedSubject) URI(java.net.URI) BackpressureOverflow(rx.BackpressureOverflow) SingularityConfiguration(com.hubspot.singularity.config.SingularityConfiguration) Acknowledge(org.apache.mesos.v1.scheduler.Protos.Call.Acknowledge) UiResource(com.hubspot.singularity.resources.ui.UiResource) FrameworkInfo(org.apache.mesos.v1.Protos.FrameworkInfo) Accept(org.apache.mesos.v1.scheduler.Protos.Call.Accept) Builder(org.apache.mesos.v1.scheduler.Protos.Call.Builder) Logger(org.slf4j.Logger) AgentID(org.apache.mesos.v1.Protos.AgentID) Message(org.apache.mesos.v1.scheduler.Protos.Call.Message) OfferID(org.apache.mesos.v1.Protos.OfferID) ExecutorID(org.apache.mesos.v1.Protos.ExecutorID) ByteString(com.google.protobuf.ByteString) SingularityServiceUIModule(com.hubspot.singularity.resources.SingularityServiceUIModule) Decline(org.apache.mesos.v1.scheduler.Protos.Call.Decline) List(java.util.List) Kill(org.apache.mesos.v1.scheduler.Protos.Call.Kill) Shutdown(org.apache.mesos.v1.scheduler.Protos.Call.Shutdown) UserAgentEntries(com.mesosphere.mesos.rx.java.util.UserAgentEntries) TaskID(org.apache.mesos.v1.Protos.TaskID) Call(org.apache.mesos.v1.scheduler.Protos.Call) Request(org.apache.mesos.v1.scheduler.Protos.Call.Request) Optional(java.util.Optional) Named(com.google.inject.name.Named) AwaitableSubscription(com.mesosphere.mesos.rx.java.AwaitableSubscription) SinkOperation(com.mesosphere.mesos.rx.java.SinkOperation) KillPolicy(org.apache.mesos.v1.Protos.KillPolicy) MesosConfiguration(com.hubspot.singularity.config.MesosConfiguration) PublishSubject(rx.subjects.PublishSubject) Call(org.apache.mesos.v1.scheduler.Protos.Call) Optional(java.util.Optional) Event(org.apache.mesos.v1.scheduler.Protos.Event) ByteString(com.google.protobuf.ByteString)

Example 2 with PublishSubject

use of rx.subjects.PublishSubject in project azure-tools-for-java by Microsoft.

the class SparkBatchJobDebuggerRunner method execute.

/**
 * Execute Spark remote debugging action, refer to {@link GenericDebuggerRunner#execute(ExecutionEnvironment)}
 * implementations, some internal API leveraged.
 *
 * @param environment the execution environment
 * @throws ExecutionException the exception in execution
 */
@Override
public void execute(final ExecutionEnvironment environment) throws ExecutionException {
    final RunProfileState state = environment.getState();
    if (state == null) {
        return;
    }
    final Operation operation = environment.getUserData(TelemetryKeys.OPERATION);
    final AsyncPromise<ExecutionEnvironment> jobDriverEnvReady = new AsyncPromise<>();
    final SparkBatchRemoteDebugState submissionState = (SparkBatchRemoteDebugState) state;
    final SparkSubmitModel submitModel = submissionState.getSubmitModel();
    // Create SSH debug session firstly
    final SparkBatchDebugSession session;
    try {
        session = SparkBatchDebugSession.factoryByAuth(getSparkJobUrl(submitModel), submitModel.getAdvancedConfigModel()).open().verifyCertificate();
    } catch (final Exception e) {
        final ExecutionException exp = new ExecutionException("Failed to create SSH session for debugging. " + ExceptionUtils.getRootCauseMessage(e));
        EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.systemError, exp, null, null);
        throw exp;
    }
    final Project project = submitModel.getProject();
    final ExecutionManager executionManager = ExecutionManager.getInstance(project);
    final IdeaSchedulers schedulers = new IdeaSchedulers(project);
    final PublishSubject<SparkBatchJobSubmissionEvent> debugEventSubject = PublishSubject.create();
    final ISparkBatchDebugJob sparkDebugBatch = (ISparkBatchDebugJob) submissionState.getSparkBatch().clone();
    final PublishSubject<SparkLogLine> ctrlSubject = (PublishSubject<SparkLogLine>) sparkDebugBatch.getCtrlSubject();
    final SparkBatchJobRemoteDebugProcess driverDebugProcess = new SparkBatchJobRemoteDebugProcess(schedulers, session, sparkDebugBatch, submitModel.getArtifactPath().orElseThrow(() -> new ExecutionException("No artifact selected")), submitModel.getSubmissionParameter().getMainClassName(), submitModel.getAdvancedConfigModel(), ctrlSubject);
    final SparkBatchJobDebugProcessHandler driverDebugHandler = new SparkBatchJobDebugProcessHandler(project, driverDebugProcess, debugEventSubject);
    // Prepare an independent submission console
    final ConsoleViewImpl submissionConsole = new ConsoleViewImpl(project, true);
    final RunContentDescriptor submissionDesc = new RunContentDescriptor(submissionConsole, driverDebugHandler, submissionConsole.getComponent(), String.format("Submit %s to cluster %s", submitModel.getSubmissionParameter().getMainClassName(), submitModel.getSubmissionParameter().getClusterName()));
    // Show the submission console view
    ExecutionManager.getInstance(project).getContentManager().showRunContent(environment.getExecutor(), submissionDesc);
    // Use the submission console to display the deployment ctrl message
    final Subscription jobSubscription = ctrlSubject.subscribe(typedMessage -> {
        final String line = typedMessage.getRawLog() + "\n";
        switch(typedMessage.getMessageInfoType()) {
            case Error:
                submissionConsole.print(line, ConsoleViewContentType.ERROR_OUTPUT);
                break;
            case Info:
                submissionConsole.print(line, ConsoleViewContentType.NORMAL_OUTPUT);
                break;
            case Log:
                submissionConsole.print(line, ConsoleViewContentType.SYSTEM_OUTPUT);
                break;
            case Warning:
                submissionConsole.print(line, ConsoleViewContentType.LOG_WARNING_OUTPUT);
                break;
        }
    }, err -> {
        submissionConsole.print(ExceptionUtils.getRootCauseMessage(err), ConsoleViewContentType.ERROR_OUTPUT);
        final String errMsg = "The Spark job remote debug is cancelled due to " + ExceptionUtils.getRootCauseMessage(err);
        jobDriverEnvReady.setError(errMsg);
        EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.systemError, new UncheckedExecutionException(errMsg, err), null, null);
    }, () -> {
        if (Optional.ofNullable(driverDebugHandler.getUserData(ProcessHandler.TERMINATION_REQUESTED)).orElse(false)) {
            final String errMsg = "The Spark job remote debug is cancelled by user.";
            jobDriverEnvReady.setError(errMsg);
            final Map<String, String> props = ImmutableMap.of("isDebugCancelled", "true");
            EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.userError, new ExecutionException(errMsg), props, null);
        }
    });
    // Call after completed or error
    debugEventSubject.subscribeOn(Schedulers.io()).doAfterTerminate(session::close).subscribe(debugEvent -> {
        try {
            if (debugEvent instanceof SparkBatchRemoteDebugHandlerReadyEvent) {
                final SparkBatchRemoteDebugHandlerReadyEvent handlerReadyEvent = (SparkBatchRemoteDebugHandlerReadyEvent) debugEvent;
                final SparkBatchDebugJobJdbPortForwardedEvent jdbReadyEvent = handlerReadyEvent.getJdbPortForwardedEvent();
                if (!jdbReadyEvent.getLocalJdbForwardedPort().isPresent()) {
                    return;
                }
                final int localPort = jdbReadyEvent.getLocalJdbForwardedPort().get();
                final ExecutionEnvironment forkEnv = forkEnvironment(environment, jdbReadyEvent.getRemoteHost().orElse("unknown"), jdbReadyEvent.isDriver());
                final RunProfile runProfile = forkEnv.getRunProfile();
                if (!(runProfile instanceof LivySparkBatchJobRunConfiguration)) {
                    ctrlSubject.onError(new UnsupportedOperationException("Only supports LivySparkBatchJobRunConfiguration type, but got type" + runProfile.getClass().getCanonicalName()));
                    return;
                }
                // Reuse the driver's Spark batch job
                ((LivySparkBatchJobRunConfiguration) runProfile).setSparkRemoteBatch(sparkDebugBatch);
                final SparkBatchRemoteDebugState forkState = jdbReadyEvent.isDriver() ? submissionState : (SparkBatchRemoteDebugState) forkEnv.getState();
                if (forkState == null) {
                    return;
                }
                // Set the debug connection to localhost and local forwarded port to the state
                forkState.setRemoteConnection(new RemoteConnection(true, "localhost", Integer.toString(localPort), false));
                // Prepare the debug tab console view UI
                SparkJobLogConsoleView jobOutputView = new SparkJobLogConsoleView(project);
                // Get YARN container log URL port
                int containerLogUrlPort = ((SparkBatchRemoteDebugJob) driverDebugProcess.getSparkJob()).getYarnContainerLogUrlPort().toBlocking().single();
                // Parse container ID and host URL from driver console view
                jobOutputView.getSecondaryConsoleView().addMessageFilter((line, entireLength) -> {
                    Matcher matcher = Pattern.compile("Launching container (\\w+).* on host ([a-zA-Z_0-9-.]+)", Pattern.CASE_INSENSITIVE).matcher(line);
                    while (matcher.find()) {
                        String containerId = matcher.group(1);
                        // TODO: get port from somewhere else rather than hard code here
                        URI hostUri = URI.create(String.format("http://%s:%d", matcher.group(2), containerLogUrlPort));
                        debugEventSubject.onNext(new SparkBatchJobExecutorCreatedEvent(hostUri, containerId));
                    }
                    return null;
                });
                jobOutputView.attachToProcess(handlerReadyEvent.getDebugProcessHandler());
                ExecutionResult result = new DefaultExecutionResult(jobOutputView, handlerReadyEvent.getDebugProcessHandler());
                forkState.setExecutionResult(result);
                forkState.setConsoleView(jobOutputView.getSecondaryConsoleView());
                forkState.setRemoteProcessCtrlLogHandler(handlerReadyEvent.getDebugProcessHandler());
                if (jdbReadyEvent.isDriver()) {
                    // Let the debug console view to handle the control log
                    jobSubscription.unsubscribe();
                    // Resolve job driver promise, handle the driver VM attaching separately
                    jobDriverEnvReady.setResult(forkEnv);
                } else {
                    // Start Executor debugging
                    executionManager.startRunProfile(forkEnv, () -> toIdeaPromise(attachAndDebug(forkEnv, forkState)));
                }
            } else if (debugEvent instanceof SparkBatchJobExecutorCreatedEvent) {
                SparkBatchJobExecutorCreatedEvent executorCreatedEvent = (SparkBatchJobExecutorCreatedEvent) debugEvent;
                final String containerId = executorCreatedEvent.getContainerId();
                final SparkBatchRemoteDebugJob debugJob = (SparkBatchRemoteDebugJob) driverDebugProcess.getSparkJob();
                URI internalHostUri = executorCreatedEvent.getHostUri();
                URI executorLogUrl = debugJob.convertToPublicLogUri(internalHostUri).map(uri -> uri.resolve(String.format("node/containerlogs/%s/livy", containerId))).toBlocking().singleOrDefault(internalHostUri);
                // Create an Executor Debug Process
                SparkBatchJobRemoteDebugExecutorProcess executorDebugProcess = new SparkBatchJobRemoteDebugExecutorProcess(schedulers, debugJob, internalHostUri.getHost(), driverDebugProcess.getDebugSession(), executorLogUrl.toString());
                SparkBatchJobDebugProcessHandler executorDebugHandler = new SparkBatchJobDebugProcessHandler(project, executorDebugProcess, debugEventSubject);
                executorDebugHandler.getRemoteDebugProcess().start();
            }
        } catch (final ExecutionException e) {
            EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.systemError, new UncheckedExecutionException(e), null, null);
            throw new UncheckedExecutionException(e);
        }
    });
    driverDebugHandler.getRemoteDebugProcess().start();
    // Driver side execute, leverage Intellij Async Promise, to wait for the Spark app deployed
    executionManager.startRunProfile(environment, () -> jobDriverEnvReady.thenAsync(driverEnv -> toIdeaPromise(attachAndDebug(driverEnv, state))));
}
Also used : ModalityState.any(com.intellij.openapi.application.ModalityState.any) ExecutionManager(com.intellij.execution.ExecutionManager) ModalityState(com.intellij.openapi.application.ModalityState) SparkJobLogConsoleView(com.microsoft.azure.hdinsight.spark.ui.SparkJobLogConsoleView) com.intellij.execution.configurations(com.intellij.execution.configurations) ExecutionEnvironment(com.intellij.execution.runners.ExecutionEnvironment) Matcher(java.util.regex.Matcher) ConsoleViewContentType(com.intellij.execution.ui.ConsoleViewContentType) ClusterManagerEx(com.microsoft.azure.hdinsight.common.ClusterManagerEx) Map(java.util.Map) Schedulers(rx.schedulers.Schedulers) ExecutionResult(com.intellij.execution.ExecutionResult) URI(java.net.URI) Nullable(com.microsoft.azuretools.azurecommons.helpers.Nullable) ImmutableMap(com.google.common.collect.ImmutableMap) WindowManager(com.intellij.openapi.wm.WindowManager) ErrorType(com.microsoft.azuretools.telemetrywrapper.ErrorType) Operation(com.microsoft.azuretools.telemetrywrapper.Operation) ExecutionEnvironmentBuilder(com.intellij.execution.runners.ExecutionEnvironmentBuilder) RunContentDescriptor(com.intellij.execution.ui.RunContentDescriptor) GenericDebuggerRunnerSettings(com.intellij.debugger.impl.GenericDebuggerRunnerSettings) RxJavaExtKt.toIdeaPromise(com.microsoft.intellij.rxjava.RxJavaExtKt.toIdeaPromise) Optional(java.util.Optional) EventUtil(com.microsoft.azuretools.telemetrywrapper.EventUtil) Pattern(java.util.regex.Pattern) Subscription(rx.Subscription) PublishSubject(rx.subjects.PublishSubject) com.microsoft.azure.hdinsight.spark.common(com.microsoft.azure.hdinsight.spark.common) GenericDebuggerRunner(com.intellij.debugger.impl.GenericDebuggerRunner) ExceptionUtils(org.apache.commons.lang3.exception.ExceptionUtils) AsyncPromise(org.jetbrains.concurrency.AsyncPromise) TelemetryKeys(com.microsoft.intellij.telemetry.TelemetryKeys) NotNull(com.microsoft.azuretools.azurecommons.helpers.NotNull) ExecutionException(com.intellij.execution.ExecutionException) LivyCluster(com.microsoft.azure.hdinsight.sdk.cluster.LivyCluster) IdeaSchedulers(com.microsoft.intellij.rxjava.IdeaSchedulers) Observable(rx.Observable) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) SparkBatchSubmission.getClusterSubmission(com.microsoft.azure.hdinsight.spark.common.SparkBatchSubmission.getClusterSubmission) Project(com.intellij.openapi.project.Project) LivySparkBatchJobRunConfiguration(com.microsoft.azure.hdinsight.spark.run.configuration.LivySparkBatchJobRunConfiguration) DefaultExecutionResult(com.intellij.execution.DefaultExecutionResult) IClusterDetail(com.microsoft.azure.hdinsight.sdk.cluster.IClusterDetail) ConsoleViewImpl(com.intellij.execution.impl.ConsoleViewImpl) Key(com.intellij.openapi.util.Key) IOException(java.io.IOException) ProcessHandler(com.intellij.execution.process.ProcessHandler) ModalityState.stateForComponent(com.intellij.openapi.application.ModalityState.stateForComponent) SparkLogLine(com.microsoft.azure.hdinsight.spark.common.log.SparkLogLine) SparkSubmissionAdvancedConfigPanel(com.microsoft.azure.hdinsight.spark.ui.SparkSubmissionAdvancedConfigPanel) javax.swing(javax.swing) ExecutionEnvironment(com.intellij.execution.runners.ExecutionEnvironment) DefaultExecutionResult(com.intellij.execution.DefaultExecutionResult) Matcher(java.util.regex.Matcher) IdeaSchedulers(com.microsoft.intellij.rxjava.IdeaSchedulers) Operation(com.microsoft.azuretools.telemetrywrapper.Operation) AsyncPromise(org.jetbrains.concurrency.AsyncPromise) LivySparkBatchJobRunConfiguration(com.microsoft.azure.hdinsight.spark.run.configuration.LivySparkBatchJobRunConfiguration) URI(java.net.URI) ConsoleViewImpl(com.intellij.execution.impl.ConsoleViewImpl) PublishSubject(rx.subjects.PublishSubject) ExecutionException(com.intellij.execution.ExecutionException) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) Subscription(rx.Subscription) SparkJobLogConsoleView(com.microsoft.azure.hdinsight.spark.ui.SparkJobLogConsoleView) ExecutionManager(com.intellij.execution.ExecutionManager) RunContentDescriptor(com.intellij.execution.ui.RunContentDescriptor) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) ExecutionResult(com.intellij.execution.ExecutionResult) DefaultExecutionResult(com.intellij.execution.DefaultExecutionResult) ExecutionException(com.intellij.execution.ExecutionException) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) IOException(java.io.IOException) Project(com.intellij.openapi.project.Project) SparkLogLine(com.microsoft.azure.hdinsight.spark.common.log.SparkLogLine)

Example 3 with PublishSubject

use of rx.subjects.PublishSubject in project azure-tools-for-java by Microsoft.

the class SparkBatchJobRunner method doExecute.

@Nullable
@Override
protected RunContentDescriptor doExecute(@NotNull RunProfileState state, @NotNull ExecutionEnvironment environment) throws ExecutionException {
    final SparkBatchRemoteRunProfileState submissionState = (SparkBatchRemoteRunProfileState) state;
    final SparkSubmitModel submitModel = submissionState.getSubmitModel();
    final Project project = submitModel.getProject();
    // Prepare the run table console view UI
    final SparkJobLogConsoleView jobOutputView = new SparkJobLogConsoleView(project);
    final String artifactPath = submitModel.getArtifactPath().orElse(null);
    assert artifactPath != null : "artifactPath should be checked in LivySparkBatchJobRunConfiguration::checkSubmissionConfigurationBeforeRun";
    // To address issue https://github.com/microsoft/azure-tools-for-java/issues/4021.
    // In this issue, when user click rerun button, we are still using the legacy ctrlSubject which has already sent
    // "onComplete" message when the job is done in the previous time. To avoid this issue,  We clone a new Spark
    // batch job instance to re-initialize everything in the object.
    final ISparkBatchJob sparkBatch = submissionState.getSparkBatch().clone();
    final PublishSubject<SparkLogLine> ctrlSubject = (PublishSubject<SparkLogLine>) sparkBatch.getCtrlSubject();
    final SparkBatchJobRemoteProcess remoteProcess = new SparkBatchJobRemoteProcess(new IdeaSchedulers(project), sparkBatch, artifactPath, submitModel.getSubmissionParameter().getMainClassName(), ctrlSubject);
    final SparkBatchJobRunProcessHandler processHandler = new SparkBatchJobRunProcessHandler(remoteProcess, "Package and deploy the job to Spark cluster", null);
    // After attaching, the console view can read the process inputStreams and display them
    jobOutputView.attachToProcess(processHandler);
    remoteProcess.start();
    final Operation operation = environment.getUserData(TelemetryKeys.OPERATION);
    // After we define a new AnAction class, IntelliJ will construct a new AnAction instance for us.
    // Use one action instance can keep behaviours like isEnabled() consistent
    final SparkBatchJobDisconnectAction disconnectAction = (SparkBatchJobDisconnectAction) ActionManager.getInstance().getAction("Actions.SparkJobDisconnect");
    disconnectAction.init(remoteProcess, operation);
    sendTelemetryForParameters(submitModel, operation);
    final ExecutionResult result = new DefaultExecutionResult(jobOutputView, processHandler, Separator.getInstance(), disconnectAction);
    submissionState.setExecutionResult(result);
    final ConsoleView consoleView = jobOutputView.getSecondaryConsoleView();
    submissionState.setConsoleView(consoleView);
    addConsoleViewFilter(remoteProcess.getSparkJob(), consoleView);
    submissionState.setRemoteProcessCtrlLogHandler(processHandler);
    ctrlSubject.subscribe(messageWithType -> {
    }, err -> disconnectAction.setEnabled(false), () -> disconnectAction.setEnabled(false));
    return super.doExecute(state, environment);
}
Also used : DefaultExecutionResult(com.intellij.execution.DefaultExecutionResult) SparkJobLogConsoleView(com.microsoft.azure.hdinsight.spark.ui.SparkJobLogConsoleView) ConsoleView(com.intellij.execution.ui.ConsoleView) IdeaSchedulers(com.microsoft.intellij.rxjava.IdeaSchedulers) ExecutionResult(com.intellij.execution.ExecutionResult) DefaultExecutionResult(com.intellij.execution.DefaultExecutionResult) Operation(com.microsoft.azuretools.telemetrywrapper.Operation) Project(com.intellij.openapi.project.Project) SparkBatchJobDisconnectAction(com.microsoft.azure.hdinsight.spark.run.action.SparkBatchJobDisconnectAction) PublishSubject(rx.subjects.PublishSubject) SparkJobLogConsoleView(com.microsoft.azure.hdinsight.spark.ui.SparkJobLogConsoleView) SparkLogLine(com.microsoft.azure.hdinsight.spark.common.log.SparkLogLine) Nullable(com.microsoft.azuretools.azurecommons.helpers.Nullable)

Aggregations

PublishSubject (rx.subjects.PublishSubject)3 DefaultExecutionResult (com.intellij.execution.DefaultExecutionResult)2 ExecutionResult (com.intellij.execution.ExecutionResult)2 Project (com.intellij.openapi.project.Project)2 SparkLogLine (com.microsoft.azure.hdinsight.spark.common.log.SparkLogLine)2 SparkJobLogConsoleView (com.microsoft.azure.hdinsight.spark.ui.SparkJobLogConsoleView)2 Nullable (com.microsoft.azuretools.azurecommons.helpers.Nullable)2 Operation (com.microsoft.azuretools.telemetrywrapper.Operation)2 IdeaSchedulers (com.microsoft.intellij.rxjava.IdeaSchedulers)2 URI (java.net.URI)2 Optional (java.util.Optional)2 Observable (rx.Observable)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 UncheckedExecutionException (com.google.common.util.concurrent.UncheckedExecutionException)1 Inject (com.google.inject.Inject)1 Named (com.google.inject.name.Named)1 ByteString (com.google.protobuf.ByteString)1 MesosConfiguration (com.hubspot.singularity.config.MesosConfiguration)1 SingularityConfiguration (com.hubspot.singularity.config.SingularityConfiguration)1 UIConfiguration (com.hubspot.singularity.config.UIConfiguration)1