use of com.google.cloud.scheduler.v1.Job in project java-docs-samples by GoogleCloudPlatform.
the class ListJobs method listJobs.
// Lists the jobs for a given location.
public static void listJobs(String projectId, String location) throws IOException {
// once, and can be reused for multiple requests.
try (TranscoderServiceClient transcoderServiceClient = TranscoderServiceClient.create()) {
var listJobsRequest = ListJobsRequest.newBuilder().setParent(LocationName.of(projectId, location).toString()).build();
// Send the list jobs request and process the response.
TranscoderServiceClient.ListJobsPagedResponse response = transcoderServiceClient.listJobs(listJobsRequest);
System.out.println("Jobs:");
for (Job job : response.iterateAll()) {
System.out.println(job.getName());
}
}
}
use of com.google.cloud.scheduler.v1.Job in project hugegraph-computer by hugegraph.
the class ComputerJobDeployer method reconcileComponent.
private void reconcileComponent(String namespace, ComputerJobComponent desired, ComputerJobComponent observed) {
ConfigMap desiredConfigMap = desired.configMap();
ConfigMap observedConfigMap = observed.configMap();
final KubernetesClient client;
if (!Objects.equals(this.kubeClient.getNamespace(), namespace)) {
client = this.kubeClient.inNamespace(namespace);
} else {
client = this.kubeClient;
}
if (desiredConfigMap == null && observedConfigMap != null) {
client.configMaps().delete(observedConfigMap);
} else if (desiredConfigMap != null && observedConfigMap == null) {
KubeUtil.ignoreExists(() -> client.configMaps().create(desiredConfigMap));
}
if (desiredConfigMap != null && observedConfigMap != null) {
LOG.debug("ConfigMap already exists, no action");
}
Job desiredMasterJob = desired.masterJob();
Job observedMasterJob = observed.masterJob();
if (desiredMasterJob == null && observedMasterJob != null) {
client.batch().v1().jobs().delete(observedMasterJob);
} else if (desiredMasterJob != null && observedMasterJob == null) {
KubeUtil.ignoreExists(() -> client.batch().v1().jobs().create(desiredMasterJob));
}
if (desiredMasterJob != null && observedMasterJob != null) {
LOG.debug("MasterJob already exists, no action");
}
Job desiredWorkerJob = desired.workerJob();
Job observedWorkerJob = observed.workerJob();
if (desiredWorkerJob == null && observedWorkerJob != null) {
client.batch().v1().jobs().delete(observedWorkerJob);
} else if (desiredWorkerJob != null && observedWorkerJob == null) {
KubeUtil.ignoreExists(() -> client.batch().v1().jobs().create(desiredWorkerJob));
}
if (desiredWorkerJob != null && observedWorkerJob != null) {
LOG.debug("WorkerJob already exists, no action");
}
}
use of com.google.cloud.scheduler.v1.Job in project hugegraph-computer by hugegraph.
the class ComputerJobController method observeComponent.
private ComputerJobComponent observeComponent(HugeGraphComputerJob computerJob) {
ComputerJobComponent observed = new ComputerJobComponent();
observed.computerJob(computerJob);
String namespace = computerJob.getMetadata().getNamespace();
String crName = computerJob.getMetadata().getName();
String masterName = KubeUtil.masterJobName(crName);
Job master = this.getResourceByName(namespace, masterName, Job.class);
observed.masterJob(master);
if (master != null) {
List<Pod> masterPods = this.getPodsByJob(master);
observed.masterPods(masterPods);
}
String workerName = KubeUtil.workerJobName(crName);
Job worker = this.getResourceByName(namespace, workerName, Job.class);
observed.workerJob(worker);
if (worker != null) {
List<Pod> workerPods = this.getPodsByJob(worker);
observed.workerPods(workerPods);
}
String configMapName = KubeUtil.configMapName(crName);
ConfigMap configMap = this.getResourceByName(namespace, configMapName, ConfigMap.class);
observed.configMap(configMap);
return observed;
}
use of com.google.cloud.scheduler.v1.Job in project strimzi-kafka-operator by strimzi.
the class JobUtils method logCurrentJobStatus.
/**
* Log actual status of Job with pods.
* @param jobName - name of the job, for which we should scrape status
* @param namespace - namespace/project where is job running
*/
public static void logCurrentJobStatus(String jobName, String namespace) {
Job currentJob = kubeClient().getJob(namespace, jobName);
if (currentJob != null && currentJob.getStatus() != null) {
List<String> log = new ArrayList<>(asList(Constants.JOB, " status:\n"));
List<JobCondition> conditions = currentJob.getStatus().getConditions();
log.add("\tActive: " + currentJob.getStatus().getActive());
log.add("\n\tFailed: " + currentJob.getStatus().getFailed());
log.add("\n\tReady: " + currentJob.getStatus().getReady());
log.add("\n\tSucceeded: " + currentJob.getStatus().getSucceeded());
if (conditions != null) {
List<String> conditionList = new ArrayList<>();
for (JobCondition condition : conditions) {
if (condition.getMessage() != null) {
conditionList.add("\t\tType: " + condition.getType() + "\n");
conditionList.add("\t\tMessage: " + condition.getMessage() + "\n");
}
}
if (!conditionList.isEmpty()) {
log.add("\n\tConditions:\n");
log.addAll(conditionList);
}
}
log.add("\n\nPods with conditions and messages:\n\n");
for (Pod pod : kubeClient().namespace(currentJob.getMetadata().getNamespace()).listPodsByPrefixInName(jobName)) {
log.add(pod.getMetadata().getName() + ":");
List<String> podConditions = new ArrayList<>();
for (PodCondition podCondition : pod.getStatus().getConditions()) {
if (podCondition.getMessage() != null) {
podConditions.add("\n\tType: " + podCondition.getType() + "\n");
podConditions.add("\tMessage: " + podCondition.getMessage() + "\n");
}
}
if (podConditions.isEmpty()) {
log.add("\n\t<EMPTY>");
} else {
log.addAll(podConditions);
}
log.add("\n\n");
}
LOGGER.info("{}", String.join("", log).strip());
}
}
use of com.google.cloud.scheduler.v1.Job in project strimzi-kafka-operator by strimzi.
the class OauthAbstractST method tearDownEach.
@AfterEach
void tearDownEach(ExtensionContext extensionContext) throws Exception {
List<Job> clusterJobList = kubeClient().getJobList().getItems().stream().filter(job -> job.getMetadata().getName().contains(mapWithClusterNames.get(extensionContext.getDisplayName()))).collect(Collectors.toList());
for (Job job : clusterJobList) {
LOGGER.info("Deleting {} job", job.getMetadata().getName());
JobUtils.deleteJobWithWait(job.getMetadata().getNamespace(), job.getMetadata().getName());
}
}
Aggregations