use of com.microsoft.azuretools.telemetrywrapper.Operation in project azure-tools-for-java by Microsoft.
the class WebAppDeployDialog method createButton.
private void createButton(Composite container) {
Composite composite = new Composite(container, SWT.NONE);
composite.setLayout(new RowLayout(SWT.VERTICAL));
composite.setLayoutData(new GridData(SWT.FILL, SWT.FILL, false, false, 1, 1));
Button btnCreate = new Button(composite, SWT.NONE);
btnCreate.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent e) {
sendTelemetry("CREATE");
EventUtil.logEvent(EventType.info, WEBAPP, OPEN_CREATEWEBAPP_DIALOG, buildProperties());
createAppService(project);
}
});
btnCreate.setText("Create...");
btnDelete = new Button(composite, SWT.NONE);
btnDelete.setEnabled(false);
btnDelete.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent e) {
sendTelemetry("DELETE");
deleteAppService();
}
});
btnDelete.setText("Delete...");
Button btnRefresh = new Button(composite, SWT.NONE);
btnRefresh.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent e) {
sendTelemetry("REFRESH");
EventUtil.executeWithLog(WEBAPP, REFRESH_METADATA, (operation) -> {
table.removeAll();
fillAppServiceDetails();
doFillTable(true);
});
}
});
btnRefresh.setText("Refresh");
btnDeployToRoot = new Button(composite, SWT.CHECK);
btnDeployToRoot.setSelection(true);
btnDeployToRoot.setText("Deploy to root");
int size = btnDeployToRoot.computeSize(SWT.DEFAULT, SWT.DEFAULT).x;
btnCreate.setLayoutData(new RowData(size, SWT.DEFAULT));
btnDelete.setLayoutData(new RowData(size, SWT.DEFAULT));
btnRefresh.setLayoutData(new RowData(size, SWT.DEFAULT));
btnDeployToRoot.setLayoutData(new RowData(size, SWT.DEFAULT));
}
use of com.microsoft.azuretools.telemetrywrapper.Operation in project azure-tools-for-java by Microsoft.
the class SettingsStep method onFinish.
@Override
public boolean onFinish() {
final boolean isNewResourceGroup = createNewRadioButton.isSelected();
final String resourceGroupName = isNewResourceGroup ? resourceGrpField.getText() : resourceGrpCombo.getSelectedItem().toString();
Operation operation = TelemetryManager.createOperation(VM, CREATE_VM);
final AzureString title = AzureOperationBundle.title("vm.create", model.getName());
AzureTaskManager.getInstance().runInBackground(new AzureTask(project, title, false, () -> {
final ProgressIndicator progressIndicator = ProgressManager.getInstance().getProgressIndicator();
progressIndicator.setIndeterminate(true);
try {
operation.start();
String certificate = model.getCertificate();
byte[] certData = new byte[0];
if (!certificate.isEmpty()) {
File certFile = new File(certificate);
if (certFile.exists()) {
try (FileInputStream certStream = new FileInputStream(certFile)) {
certData = new byte[(int) certFile.length()];
if (certStream.read(certData) != certData.length) {
throw new Exception("Unable to process certificate: stream longer than informed size.");
}
} finally {
}
}
}
// create storage account when use choose to create new one
if (Objects.nonNull(model.getStorageAccountConfig())) {
model.setStorageAccount(CreateStorageAccountAction.createStorageAccount(model.getStorageAccountConfig()));
}
final com.microsoft.azure.management.compute.VirtualMachine vm = AzureSDKManager.createVirtualMachine(model.getSubscription().getId(), model.getName(), resourceGroupName, createNewRadioButton.isSelected(), model.getSize(), model.getRegion().getName(), model.getVirtualMachineImage(), model.getKnownMachineImage(), model.isKnownMachineImage(), model.getStorageAccount(), model.getVirtualNetwork(), model.getNewNetwork(), model.isWithNewNetwork(), model.getSubnet(), model.getPublicIpAddress(), model.isWithNewPip(), model.getAvailabilitySet(), model.isWithNewAvailabilitySet(), model.getUserName(), model.getPassword(), certData.length > 0 ? new String(certData) : null);
AzureTaskManager.getInstance().runLater(() -> {
try {
parent.addChildNode(new com.microsoft.tooling.msservices.serviceexplorer.azure.vmarm.VMNode(parent, model.getSubscription().getId(), vm));
} catch (AzureCmdException e) {
String msg = "An error occurred while attempting to refresh the list of virtual machines.";
DefaultLoader.getUIHelper().showException(msg, e, "Azure Services Explorer - Error Refreshing VM List", false, true);
AzurePlugin.log(msg, e);
}
});
} catch (Exception e) {
EventUtil.logError(operation, ErrorType.userError, e, null, null);
String msg = "An error occurred while attempting to create the specified virtual machine." + "<br>" + String.format(message("webappExpMsg"), e.getMessage());
DefaultLoader.getUIHelper().showException(msg, e, message("errTtl"), false, true);
AzurePlugin.log(msg, e);
} finally {
operation.complete();
}
}));
return super.onFinish();
}
use of com.microsoft.azuretools.telemetrywrapper.Operation in project azure-tools-for-java by Microsoft.
the class SparkBatchJobDebuggerRunner method execute.
/**
* Execute Spark remote debugging action, refer to {@link GenericDebuggerRunner#execute(ExecutionEnvironment)}
* implementations, some internal API leveraged.
*
* @param environment the execution environment
* @throws ExecutionException the exception in execution
*/
@Override
public void execute(final ExecutionEnvironment environment) throws ExecutionException {
final RunProfileState state = environment.getState();
if (state == null) {
return;
}
final Operation operation = environment.getUserData(TelemetryKeys.OPERATION);
final AsyncPromise<ExecutionEnvironment> jobDriverEnvReady = new AsyncPromise<>();
final SparkBatchRemoteDebugState submissionState = (SparkBatchRemoteDebugState) state;
final SparkSubmitModel submitModel = submissionState.getSubmitModel();
// Create SSH debug session firstly
final SparkBatchDebugSession session;
try {
session = SparkBatchDebugSession.factoryByAuth(getSparkJobUrl(submitModel), submitModel.getAdvancedConfigModel()).open().verifyCertificate();
} catch (final Exception e) {
final ExecutionException exp = new ExecutionException("Failed to create SSH session for debugging. " + ExceptionUtils.getRootCauseMessage(e));
EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.systemError, exp, null, null);
throw exp;
}
final Project project = submitModel.getProject();
final ExecutionManager executionManager = ExecutionManager.getInstance(project);
final IdeaSchedulers schedulers = new IdeaSchedulers(project);
final PublishSubject<SparkBatchJobSubmissionEvent> debugEventSubject = PublishSubject.create();
final ISparkBatchDebugJob sparkDebugBatch = (ISparkBatchDebugJob) submissionState.getSparkBatch().clone();
final PublishSubject<SparkLogLine> ctrlSubject = (PublishSubject<SparkLogLine>) sparkDebugBatch.getCtrlSubject();
final SparkBatchJobRemoteDebugProcess driverDebugProcess = new SparkBatchJobRemoteDebugProcess(schedulers, session, sparkDebugBatch, submitModel.getArtifactPath().orElseThrow(() -> new ExecutionException("No artifact selected")), submitModel.getSubmissionParameter().getMainClassName(), submitModel.getAdvancedConfigModel(), ctrlSubject);
final SparkBatchJobDebugProcessHandler driverDebugHandler = new SparkBatchJobDebugProcessHandler(project, driverDebugProcess, debugEventSubject);
// Prepare an independent submission console
final ConsoleViewImpl submissionConsole = new ConsoleViewImpl(project, true);
final RunContentDescriptor submissionDesc = new RunContentDescriptor(submissionConsole, driverDebugHandler, submissionConsole.getComponent(), String.format("Submit %s to cluster %s", submitModel.getSubmissionParameter().getMainClassName(), submitModel.getSubmissionParameter().getClusterName()));
// Show the submission console view
ExecutionManager.getInstance(project).getContentManager().showRunContent(environment.getExecutor(), submissionDesc);
// Use the submission console to display the deployment ctrl message
final Subscription jobSubscription = ctrlSubject.subscribe(typedMessage -> {
final String line = typedMessage.getRawLog() + "\n";
switch(typedMessage.getMessageInfoType()) {
case Error:
submissionConsole.print(line, ConsoleViewContentType.ERROR_OUTPUT);
break;
case Info:
submissionConsole.print(line, ConsoleViewContentType.NORMAL_OUTPUT);
break;
case Log:
submissionConsole.print(line, ConsoleViewContentType.SYSTEM_OUTPUT);
break;
case Warning:
submissionConsole.print(line, ConsoleViewContentType.LOG_WARNING_OUTPUT);
break;
}
}, err -> {
submissionConsole.print(ExceptionUtils.getRootCauseMessage(err), ConsoleViewContentType.ERROR_OUTPUT);
final String errMsg = "The Spark job remote debug is cancelled due to " + ExceptionUtils.getRootCauseMessage(err);
jobDriverEnvReady.setError(errMsg);
EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.systemError, new UncheckedExecutionException(errMsg, err), null, null);
}, () -> {
if (Optional.ofNullable(driverDebugHandler.getUserData(ProcessHandler.TERMINATION_REQUESTED)).orElse(false)) {
final String errMsg = "The Spark job remote debug is cancelled by user.";
jobDriverEnvReady.setError(errMsg);
final Map<String, String> props = ImmutableMap.of("isDebugCancelled", "true");
EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.userError, new ExecutionException(errMsg), props, null);
}
});
// Call after completed or error
debugEventSubject.subscribeOn(Schedulers.io()).doAfterTerminate(session::close).subscribe(debugEvent -> {
try {
if (debugEvent instanceof SparkBatchRemoteDebugHandlerReadyEvent) {
final SparkBatchRemoteDebugHandlerReadyEvent handlerReadyEvent = (SparkBatchRemoteDebugHandlerReadyEvent) debugEvent;
final SparkBatchDebugJobJdbPortForwardedEvent jdbReadyEvent = handlerReadyEvent.getJdbPortForwardedEvent();
if (!jdbReadyEvent.getLocalJdbForwardedPort().isPresent()) {
return;
}
final int localPort = jdbReadyEvent.getLocalJdbForwardedPort().get();
final ExecutionEnvironment forkEnv = forkEnvironment(environment, jdbReadyEvent.getRemoteHost().orElse("unknown"), jdbReadyEvent.isDriver());
final RunProfile runProfile = forkEnv.getRunProfile();
if (!(runProfile instanceof LivySparkBatchJobRunConfiguration)) {
ctrlSubject.onError(new UnsupportedOperationException("Only supports LivySparkBatchJobRunConfiguration type, but got type" + runProfile.getClass().getCanonicalName()));
return;
}
// Reuse the driver's Spark batch job
((LivySparkBatchJobRunConfiguration) runProfile).setSparkRemoteBatch(sparkDebugBatch);
final SparkBatchRemoteDebugState forkState = jdbReadyEvent.isDriver() ? submissionState : (SparkBatchRemoteDebugState) forkEnv.getState();
if (forkState == null) {
return;
}
// Set the debug connection to localhost and local forwarded port to the state
forkState.setRemoteConnection(new RemoteConnection(true, "localhost", Integer.toString(localPort), false));
// Prepare the debug tab console view UI
SparkJobLogConsoleView jobOutputView = new SparkJobLogConsoleView(project);
// Get YARN container log URL port
int containerLogUrlPort = ((SparkBatchRemoteDebugJob) driverDebugProcess.getSparkJob()).getYarnContainerLogUrlPort().toBlocking().single();
// Parse container ID and host URL from driver console view
jobOutputView.getSecondaryConsoleView().addMessageFilter((line, entireLength) -> {
Matcher matcher = Pattern.compile("Launching container (\\w+).* on host ([a-zA-Z_0-9-.]+)", Pattern.CASE_INSENSITIVE).matcher(line);
while (matcher.find()) {
String containerId = matcher.group(1);
// TODO: get port from somewhere else rather than hard code here
URI hostUri = URI.create(String.format("http://%s:%d", matcher.group(2), containerLogUrlPort));
debugEventSubject.onNext(new SparkBatchJobExecutorCreatedEvent(hostUri, containerId));
}
return null;
});
jobOutputView.attachToProcess(handlerReadyEvent.getDebugProcessHandler());
ExecutionResult result = new DefaultExecutionResult(jobOutputView, handlerReadyEvent.getDebugProcessHandler());
forkState.setExecutionResult(result);
forkState.setConsoleView(jobOutputView.getSecondaryConsoleView());
forkState.setRemoteProcessCtrlLogHandler(handlerReadyEvent.getDebugProcessHandler());
if (jdbReadyEvent.isDriver()) {
// Let the debug console view to handle the control log
jobSubscription.unsubscribe();
// Resolve job driver promise, handle the driver VM attaching separately
jobDriverEnvReady.setResult(forkEnv);
} else {
// Start Executor debugging
executionManager.startRunProfile(forkEnv, () -> toIdeaPromise(attachAndDebug(forkEnv, forkState)));
}
} else if (debugEvent instanceof SparkBatchJobExecutorCreatedEvent) {
SparkBatchJobExecutorCreatedEvent executorCreatedEvent = (SparkBatchJobExecutorCreatedEvent) debugEvent;
final String containerId = executorCreatedEvent.getContainerId();
final SparkBatchRemoteDebugJob debugJob = (SparkBatchRemoteDebugJob) driverDebugProcess.getSparkJob();
URI internalHostUri = executorCreatedEvent.getHostUri();
URI executorLogUrl = debugJob.convertToPublicLogUri(internalHostUri).map(uri -> uri.resolve(String.format("node/containerlogs/%s/livy", containerId))).toBlocking().singleOrDefault(internalHostUri);
// Create an Executor Debug Process
SparkBatchJobRemoteDebugExecutorProcess executorDebugProcess = new SparkBatchJobRemoteDebugExecutorProcess(schedulers, debugJob, internalHostUri.getHost(), driverDebugProcess.getDebugSession(), executorLogUrl.toString());
SparkBatchJobDebugProcessHandler executorDebugHandler = new SparkBatchJobDebugProcessHandler(project, executorDebugProcess, debugEventSubject);
executorDebugHandler.getRemoteDebugProcess().start();
}
} catch (final ExecutionException e) {
EventUtil.logErrorClassNameOnlyWithComplete(operation, ErrorType.systemError, new UncheckedExecutionException(e), null, null);
throw new UncheckedExecutionException(e);
}
});
driverDebugHandler.getRemoteDebugProcess().start();
// Driver side execute, leverage Intellij Async Promise, to wait for the Spark app deployed
executionManager.startRunProfile(environment, () -> jobDriverEnvReady.thenAsync(driverEnv -> toIdeaPromise(attachAndDebug(driverEnv, state))));
}
use of com.microsoft.azuretools.telemetrywrapper.Operation in project azure-tools-for-java by Microsoft.
the class AzureRunProfileState method execute.
@Nullable
@Override
public ExecutionResult execute(Executor executor, @NotNull ProgramRunner programRunner) throws ExecutionException {
final RunProcessHandler processHandler = new RunProcessHandler();
processHandler.addDefaultListener();
ConsoleView consoleView = TextConsoleBuilderFactory.getInstance().createBuilder(this.project).getConsole();
processHandler.startNotify();
consoleView.attachToProcess(processHandler);
final Operation operation = createOperation();
final Disposable subscribe = Mono.fromCallable(() -> {
try {
operation.start();
return this.executeSteps(processHandler, operation);
} finally {
// Once the operation done, whether success or not, `setText` should not throw new exception
processHandler.setProcessTerminatedHandler(RunProcessHandler.DO_NOTHING);
}
}).subscribeOn(Schedulers.boundedElastic()).subscribe((res) -> {
this.sendTelemetry(operation, null);
this.onSuccess(res, processHandler);
}, (err) -> {
err.printStackTrace();
this.sendTelemetry(operation, err);
this.onFail(err, processHandler);
});
processHandler.addProcessListener(new ProcessAdapter() {
@Override
public void processTerminated(@NotNull ProcessEvent event) {
subscribe.dispose();
}
});
return new DefaultExecutionResult(consoleView, processHandler);
}
use of com.microsoft.azuretools.telemetrywrapper.Operation in project azure-tools-for-java by Microsoft.
the class SparkBatchJobRunner method doExecute.
@Nullable
@Override
protected RunContentDescriptor doExecute(@NotNull RunProfileState state, @NotNull ExecutionEnvironment environment) throws ExecutionException {
final SparkBatchRemoteRunProfileState submissionState = (SparkBatchRemoteRunProfileState) state;
final SparkSubmitModel submitModel = submissionState.getSubmitModel();
final Project project = submitModel.getProject();
// Prepare the run table console view UI
final SparkJobLogConsoleView jobOutputView = new SparkJobLogConsoleView(project);
final String artifactPath = submitModel.getArtifactPath().orElse(null);
assert artifactPath != null : "artifactPath should be checked in LivySparkBatchJobRunConfiguration::checkSubmissionConfigurationBeforeRun";
// To address issue https://github.com/microsoft/azure-tools-for-java/issues/4021.
// In this issue, when user click rerun button, we are still using the legacy ctrlSubject which has already sent
// "onComplete" message when the job is done in the previous time. To avoid this issue, We clone a new Spark
// batch job instance to re-initialize everything in the object.
final ISparkBatchJob sparkBatch = submissionState.getSparkBatch().clone();
final PublishSubject<SparkLogLine> ctrlSubject = (PublishSubject<SparkLogLine>) sparkBatch.getCtrlSubject();
final SparkBatchJobRemoteProcess remoteProcess = new SparkBatchJobRemoteProcess(new IdeaSchedulers(project), sparkBatch, artifactPath, submitModel.getSubmissionParameter().getMainClassName(), ctrlSubject);
final SparkBatchJobRunProcessHandler processHandler = new SparkBatchJobRunProcessHandler(remoteProcess, "Package and deploy the job to Spark cluster", null);
// After attaching, the console view can read the process inputStreams and display them
jobOutputView.attachToProcess(processHandler);
remoteProcess.start();
final Operation operation = environment.getUserData(TelemetryKeys.OPERATION);
// After we define a new AnAction class, IntelliJ will construct a new AnAction instance for us.
// Use one action instance can keep behaviours like isEnabled() consistent
final SparkBatchJobDisconnectAction disconnectAction = (SparkBatchJobDisconnectAction) ActionManager.getInstance().getAction("Actions.SparkJobDisconnect");
disconnectAction.init(remoteProcess, operation);
sendTelemetryForParameters(submitModel, operation);
final ExecutionResult result = new DefaultExecutionResult(jobOutputView, processHandler, Separator.getInstance(), disconnectAction);
submissionState.setExecutionResult(result);
final ConsoleView consoleView = jobOutputView.getSecondaryConsoleView();
submissionState.setConsoleView(consoleView);
addConsoleViewFilter(remoteProcess.getSparkJob(), consoleView);
submissionState.setRemoteProcessCtrlLogHandler(processHandler);
ctrlSubject.subscribe(messageWithType -> {
}, err -> disconnectAction.setEnabled(false), () -> disconnectAction.setEnabled(false));
return super.doExecute(state, environment);
}
Aggregations