use of com.google.cloud.dataproc.v1beta2.Job in project strimzi-kafka-operator by strimzi.
the class OauthAbstractST method tearDownEach.
@AfterEach
void tearDownEach(ExtensionContext extensionContext) throws Exception {
List<Job> clusterJobList = kubeClient().getJobList().getItems().stream().filter(job -> job.getMetadata().getName().contains(mapWithClusterNames.get(extensionContext.getDisplayName()))).collect(Collectors.toList());
for (Job job : clusterJobList) {
LOGGER.info("Deleting {} job", job.getMetadata().getName());
JobUtils.deleteJobWithWait(job.getMetadata().getNamespace(), job.getMetadata().getName());
}
}
use of com.google.cloud.dataproc.v1beta2.Job in project hugegraph-computer by hugegraph.
the class ComputerJobController method observeComponent.
private ComputerJobComponent observeComponent(HugeGraphComputerJob computerJob) {
ComputerJobComponent observed = new ComputerJobComponent();
observed.computerJob(computerJob);
String namespace = computerJob.getMetadata().getNamespace();
String crName = computerJob.getMetadata().getName();
String masterName = KubeUtil.masterJobName(crName);
Job master = this.getResourceByName(namespace, masterName, Job.class);
observed.masterJob(master);
if (master != null) {
List<Pod> masterPods = this.getPodsByJob(master);
observed.masterPods(masterPods);
}
String workerName = KubeUtil.workerJobName(crName);
Job worker = this.getResourceByName(namespace, workerName, Job.class);
observed.workerJob(worker);
if (worker != null) {
List<Pod> workerPods = this.getPodsByJob(worker);
observed.workerPods(workerPods);
}
String configMapName = KubeUtil.configMapName(crName);
ConfigMap configMap = this.getResourceByName(namespace, configMapName, ConfigMap.class);
observed.configMap(configMap);
return observed;
}
use of com.google.cloud.dataproc.v1beta2.Job in project hugegraph-computer by hugegraph.
the class ComputerJobDeployer method reconcileComponent.
private void reconcileComponent(String namespace, ComputerJobComponent desired, ComputerJobComponent observed) {
ConfigMap desiredConfigMap = desired.configMap();
ConfigMap observedConfigMap = observed.configMap();
final KubernetesClient client;
if (!Objects.equals(this.kubeClient.getNamespace(), namespace)) {
client = this.kubeClient.inNamespace(namespace);
} else {
client = this.kubeClient;
}
if (desiredConfigMap == null && observedConfigMap != null) {
client.configMaps().delete(observedConfigMap);
} else if (desiredConfigMap != null && observedConfigMap == null) {
KubeUtil.ignoreExists(() -> client.configMaps().create(desiredConfigMap));
}
if (desiredConfigMap != null && observedConfigMap != null) {
LOG.debug("ConfigMap already exists, no action");
}
Job desiredMasterJob = desired.masterJob();
Job observedMasterJob = observed.masterJob();
if (desiredMasterJob == null && observedMasterJob != null) {
client.batch().v1().jobs().delete(observedMasterJob);
} else if (desiredMasterJob != null && observedMasterJob == null) {
KubeUtil.ignoreExists(() -> client.batch().v1().jobs().create(desiredMasterJob));
}
if (desiredMasterJob != null && observedMasterJob != null) {
LOG.debug("MasterJob already exists, no action");
}
Job desiredWorkerJob = desired.workerJob();
Job observedWorkerJob = observed.workerJob();
if (desiredWorkerJob == null && observedWorkerJob != null) {
client.batch().v1().jobs().delete(observedWorkerJob);
} else if (desiredWorkerJob != null && observedWorkerJob == null) {
KubeUtil.ignoreExists(() -> client.batch().v1().jobs().create(desiredWorkerJob));
}
if (desiredWorkerJob != null && observedWorkerJob != null) {
LOG.debug("WorkerJob already exists, no action");
}
}
use of com.google.cloud.dataproc.v1beta2.Job in project pravega by pravega.
the class RemoteSequential method isTestRunning.
private boolean isTestRunning(final String jobId, final Metronome client) {
Job jobStatus = client.getJob(jobId);
boolean isRunning = false;
if (jobStatus.getHistory() == null) {
isRunning = true;
} else if ((jobStatus.getHistory().getSuccessCount() == 0) && (jobStatus.getHistory().getFailureCount() == 0)) {
isRunning = true;
}
return isRunning;
}
use of com.google.cloud.dataproc.v1beta2.Job in project pravega by pravega.
the class RemoteSequential method startTestExecution.
@Override
public CompletableFuture<Void> startTestExecution(Method testMethod) {
Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(60));
// This will be removed once issue https://github.com/pravega/pravega/issues/1665 is resolved.
log.debug("Starting test execution for method: {}", testMethod);
final Metronome client = AuthEnabledMetronomeClient.getClient();
String className = testMethod.getDeclaringClass().getName();
String methodName = testMethod.getName();
// All jobIds should have lowercase for metronome.
String jobId = (methodName + ".testJob").toLowerCase();
return CompletableFuture.runAsync(() -> {
client.createJob(newJob(jobId, className, methodName));
Response response = client.triggerJobRun(jobId);
if (response.status() != CREATED.getStatusCode()) {
throw new TestFrameworkException(TestFrameworkException.Type.ConnectionFailed, "Error while starting " + "test " + testMethod);
} else {
log.info("Created job succeeded with: " + response.toString());
}
}).thenCompose(v2 -> waitForJobCompletion(jobId, client)).<Void>thenApply(v1 -> {
if (client.getJob(jobId).getHistory().getFailureCount() != 0) {
throw new AssertionError("Test failed, detailed logs can be found at " + "https://MasterIP/mesos, under metronome framework tasks. MethodName: " + methodName);
}
return null;
}).whenComplete((v, ex) -> {
// deletejob once execution is complete.
deleteJob(jobId, client);
if (ex != null) {
log.error("Error while executing the test. ClassName: {}, MethodName: {}", className, methodName);
}
});
}
Aggregations