use of com.microsoft.azuretools.azurecommons.helpers.NotNull in project azure-tools-for-java by Microsoft.
the class IDEHelperImpl method buildArtifact.
@NotNull
@Override
public ListenableFuture<String> buildArtifact(@NotNull ProjectDescriptor projectDescriptor, @NotNull ArtifactDescriptor artifactDescriptor) {
try {
final Project project = findOpenProject(projectDescriptor);
final Artifact artifact = findProjectArtifact(project, artifactDescriptor);
final SettableFuture<String> future = SettableFuture.create();
Futures.addCallback(buildArtifact(project, artifact, false), new FutureCallback<>() {
@Override
public void onSuccess(@Nullable Boolean succeded) {
if (succeded != null && succeded) {
future.set(artifact.getOutputFilePath());
} else {
future.setException(new AzureCmdException("An error occurred while building the artifact"));
}
}
@Override
public void onFailure(Throwable throwable) {
if (throwable instanceof ExecutionException) {
future.setException(new AzureCmdException("An error occurred while building the artifact", throwable.getCause()));
} else {
future.setException(new AzureCmdException("An error occurred while building the artifact", throwable));
}
}
}, MoreExecutors.directExecutor());
return future;
} catch (final AzureCmdException e) {
return Futures.immediateFailedFuture(e);
}
}
use of com.microsoft.azuretools.azurecommons.helpers.NotNull in project azure-tools-for-java by Microsoft.
the class ContainerRegistryMvpModel method getContainerRegistry.
/**
* Get ACR by Id.
*/
@NotNull
public Registry getContainerRegistry(String sid, String id) throws Exception {
Azure azure = AuthMethodManager.getInstance().getAzureClient(sid);
Registries registries = azure.containerRegistries();
if (registries == null) {
throw new Exception(CANNOT_GET_REGISTRY + id);
}
Registry registry = registries.getById(id);
if (registry == null) {
throw new Exception(CANNOT_GET_REGISTRY + id);
}
return registry;
}
use of com.microsoft.azuretools.azurecommons.helpers.NotNull in project azure-tools-for-java by Microsoft.
the class CosmosSparkBatchJob method deploy.
@NotNull
@Override
public Observable<? extends ISparkBatchJob> deploy(@NotNull String artifactPath) {
return getCosmosSparkCluster().flatMap(cluster -> {
try {
if (cluster.getStorageAccount() == null) {
// TODO: May use interaction session to upload
return Observable.empty();
}
File localFile = new File(artifactPath);
URI remoteUri = URI.create(cluster.getStorageAccount().getDefaultContainerOrRootPath()).resolve("SparkSubmission/").resolve(JobUtils.getFormatPathByDate() + "/").resolve(localFile.getName());
ctrlInfo(String.format("Begin uploading file %s to Azure Datalake store %s ...", artifactPath, remoteUri));
getSubmissionParameter().setFilePath(remoteUri.toString());
return cluster.uploadToStorage(localFile, remoteUri);
} catch (Exception e) {
return Observable.error(e);
}
}).doOnNext(size -> ctrlInfo(String.format("Upload to Azure Datalake store %d bytes successfully.", size))).map(path -> this);
}
use of com.microsoft.azuretools.azurecommons.helpers.NotNull in project azure-tools-for-java by Microsoft.
the class CosmosServerlessSparkBatchJob method getSubmissionLog.
@NotNull
@Override
public Observable<SparkLogLine> getSubmissionLog() {
final ImmutableSet<String> ignoredEmptyLines = ImmutableSet.of("stdout:", "stderr:", "yarn diagnostics:");
final int GET_LIVY_URL_REPEAT_DELAY_MILLISECONDS = 3000;
final int MAX_LOG_LINES_PER_REQUEST = 128;
final int GET_LOG_REPEAT_DELAY_MILLISECONDS = 1000;
// We need to repeatly call getSparkBatchJobRequest() since "livyServerApi" field does not always exist in response but
// only appeared for a while and before that we can't get the "livyServerApi" field.
ctrlInfo("Trying to get livy URL...");
return getSparkBatchJobRequest().flatMap(batchResp -> getJobSchedulerState(batchResp) == null ? Observable.error(new IOException("Failed to get scheduler state of the job.")) : Observable.just(batchResp)).retryWhen(err -> err.zipWith(Observable.range(1, getRetriesMax()), (n, i) -> i).delay(getDelaySeconds(), TimeUnit.SECONDS)).repeatWhen(ob -> ob.delay(GET_LIVY_URL_REPEAT_DELAY_MILLISECONDS, TimeUnit.MILLISECONDS)).takeUntil(batchResp -> isJobEnded(batchResp) || StringUtils.isNotEmpty(getLivyAPI(batchResp))).filter(batchResp -> isJobEnded(batchResp) || StringUtils.isNotEmpty(getLivyAPI(batchResp))).flatMap(job -> {
if (isJobEnded(job)) {
final String jobState = getJobState(job);
final String schedulerState = getJobSchedulerState(job);
final String message = String.format("Job scheduler state: %s. Job running state: %s.", schedulerState, jobState);
return Observable.just(new SparkLogLine(TOOL, Info, message));
} else {
return Observable.just(job).doOnNext(batchResp -> {
ctrlInfo("Successfully get livy URL: " + batchResp.properties().livyServerAPI());
ctrlInfo("Trying to retrieve livy submission logs...");
// After test we find batch id won't be provided until the job is in running state
// However, since only one spark job will be run on the cluster, the batch ID should always be 0
setBatchId(0);
}).map(batchResp -> batchResp.properties().livyServerAPI()).flatMap(livyUrl -> Observable.defer(() -> getSubmissionLogRequest(livyUrl, getBatchId(), getLogStartIndex(), MAX_LOG_LINES_PER_REQUEST)).map(sparkJobLog -> Optional.ofNullable(sparkJobLog.getLog()).orElse(Collections.<String>emptyList())).doOnNext(logs -> setLogStartIndex(getLogStartIndex() + logs.size())).map(logs -> logs.stream().filter(logLine -> !ignoredEmptyLines.contains(logLine.trim().toLowerCase())).collect(Collectors.toList())).flatMap(logLines -> {
if (logLines.size() > 0) {
return Observable.just(Triple.of(logLines, SparkBatchJobState.STARTING.toString(), SchedulerState.SCHEDULED.toString()));
} else {
return getSparkBatchJobRequest().map(batchResp -> Triple.of(logLines, getJobState(batchResp), getJobSchedulerState(batchResp)));
}
}).onErrorResumeNext(errors -> getSparkBatchJobRequest().delay(getDelaySeconds(), TimeUnit.SECONDS).map(batchResp -> Triple.of(new ArrayList<>(), getJobState(batchResp), getJobSchedulerState(batchResp)))).repeatWhen(ob -> ob.delay(GET_LOG_REPEAT_DELAY_MILLISECONDS, TimeUnit.MILLISECONDS)).takeUntil(logAndStatesTriple -> {
String jobRunningState = logAndStatesTriple.getMiddle();
String jobSchedulerState = logAndStatesTriple.getRight();
return jobRunningState != null && !jobRunningState.equalsIgnoreCase(SparkBatchJobState.STARTING.toString()) || jobSchedulerState != null && jobSchedulerState.equalsIgnoreCase(SchedulerState.ENDED.toString());
}).flatMap(logAndStatesTriple -> {
final String jobRunningState = logAndStatesTriple.getMiddle();
final String jobSchedulerState = logAndStatesTriple.getRight();
if (jobRunningState != null && !jobRunningState.equalsIgnoreCase(SparkBatchJobState.STARTING.toString()) || jobSchedulerState != null && jobSchedulerState.equalsIgnoreCase(SchedulerState.ENDED.toString())) {
final String message = String.format("Job scheduler state: %s. Job running state: %s.", jobSchedulerState, jobRunningState);
return Observable.just(new SparkLogLine(TOOL, Info, message));
} else {
return Observable.from(logAndStatesTriple.getLeft()).map(line -> new SparkLogLine(LIVY, Log, line));
}
}));
}
});
}
use of com.microsoft.azuretools.azurecommons.helpers.NotNull in project azure-tools-for-java by Microsoft.
the class SparkBatchSubmission method getUserAgentPerRequest.
/**
* To generator a User-Agent for HTTP request with a random UUID
*
* @param isMapToInstallID true for create the relationship between the UUID and InstallationID
* @return the unique UA string
*/
@NotNull
private String getUserAgentPerRequest(boolean isMapToInstallID) {
String loadingClass = SparkBatchSubmission.class.getClassLoader().getClass().getName().toLowerCase();
String userAgentSource = loadingClass.contains("intellij") ? "Azure Toolkit for IntelliJ " : (loadingClass.contains("eclipse") ? "Azure Toolkit for Eclipse " : "Azure HDInsight Java SDK ");
String requestId = AppInsightsClient.getConfigurationSessionId() == null ? UUID.randomUUID().toString() : AppInsightsClient.getConfigurationSessionId();
if (isMapToInstallID) {
new AppInsightsHttpRequestInstallIdMapRecord(requestId, getInstallationID()).post();
}
return userAgentSource + requestId;
}
Aggregations