Search in sources :

Example 21 with Job

use of com.google.cloud.scheduler.v1.Job in project java-scheduler by googleapis.

the class ITSystemTest method runJobTest.

@Test
public void runJobTest() throws Exception {
    RunJobRequest jobRequest = RunJobRequest.newBuilder().setName(JOB_NAME).build();
    ApiFuture<Job> job = client.runJobCallable().futureCall(jobRequest);
    while (true) {
        if (job.isDone()) {
            assertJobDetails(job.get());
            break;
        }
    }
}
Also used : RunJobRequest(com.google.cloud.scheduler.v1beta1.RunJobRequest) Job(com.google.cloud.scheduler.v1beta1.Job) Test(org.junit.Test)

Example 22 with Job

use of com.google.cloud.scheduler.v1.Job in project java-scheduler by googleapis.

the class ITSystemTest method getJobTest.

@Test
public void getJobTest() {
    Job job = client.getJob(JOB_NAME);
    assertJobDetails(job);
}
Also used : Job(com.google.cloud.scheduler.v1.Job) Test(org.junit.Test)

Example 23 with Job

use of com.google.cloud.scheduler.v1.Job in project cmcc-operator by T-Systems-MMS.

the class DefaultTargetState method isReady.

private boolean isReady(String jobName) {
    jobName = concatOptional(cmcc.getSpec().getDefaults().getNamePrefix(), jobName);
    Job job = kubernetesClient.batch().v1().jobs().inNamespace(cmcc.getMetadata().getNamespace()).withName(jobName).get();
    boolean ready = job != null && job.getStatus() != null && job.getStatus().getSucceeded() != null && job.getStatus().getSucceeded() > 0;
    if (ready) {
        log.debug("job {}: has succeeded", jobName);
    } else {
        log.debug("job {}: waiting for successful completion", jobName);
    }
    return ready;
}
Also used : Job(io.fabric8.kubernetes.api.model.batch.v1.Job)

Example 24 with Job

use of com.google.cloud.scheduler.v1.Job in project stackgres by ongres.

the class DbOpsJob method createJob.

@Override
public Job createJob(StackGresDbOpsContext context) {
    final StackGresDbOps dbOps = context.getSource();
    final String retries = String.valueOf(DbOpsUtil.getCurrentRetry(dbOps));
    List<EnvVar> runEnvVars = getRunEnvVars(context);
    List<EnvVar> setResultEnvVars = getSetResultEnvVars(context);
    final String namespace = dbOps.getMetadata().getNamespace();
    final String name = dbOps.getMetadata().getName();
    final Map<String, String> labels = dbOpsLabelFactory.dbOpsPodLabels(context.getSource());
    final String timeout = DbOpsUtil.getTimeout(dbOps);
    return new JobBuilder().withNewMetadata().withNamespace(namespace).withName(jobName(dbOps)).withLabels(labels).endMetadata().withNewSpec().withBackoffLimit(0).withCompletions(1).withParallelism(1).withNewTemplate().withNewMetadata().withNamespace(namespace).withName(jobName(dbOps)).withLabels(labels).endMetadata().withNewSpec().withSecurityContext(podSecurityFactory.createResource(context)).withRestartPolicy("Never").withServiceAccountName(DbOpsRole.roleName(context)).withInitContainers(new ContainerBuilder().withName("set-dbops-running").withImage(getSetResultImage()).withImagePullPolicy("IfNotPresent").withEnv(ImmutableList.<EnvVar>builder().addAll(clusterEnvironmentVariables.listResources(context)).add(new EnvVarBuilder().withName("OP_NAME").withValue(dbOps.getSpec().getOp()).build(), new EnvVarBuilder().withName("NORMALIZED_OP_NAME").withValue(UPPERCASE_PATTERN.matcher(dbOps.getSpec().getOp()).replaceAll(result -> " " + result.group(1).toLowerCase(Locale.US))).build(), new EnvVarBuilder().withName("KEBAB_OP_NAME").withValue(UPPERCASE_PATTERN.matcher(dbOps.getSpec().getOp()).replaceAll(result -> "-" + result.group(1).toLowerCase(Locale.US))).build(), new EnvVarBuilder().withName("CLUSTER_NAMESPACE").withValue(namespace).build(), new EnvVarBuilder().withName("DB_OPS_NAME").withValue(name).build(), new EnvVarBuilder().withName("DB_OPS_CRD_NAME").withValue(CustomResource.getCRDName(StackGresDbOps.class)).build(), new EnvVarBuilder().withName("CURRENT_RETRY").withValue(retries).build()).addAll(Seq.of(DbOpsStatusCondition.values()).map(c -> new EnvVarBuilder().withName("CONDITION_" + c.name()).withValue(conditions.get(c)).build()).toList()).build()).withCommand("/bin/sh", "-ex", ClusterStatefulSetPath.LOCAL_BIN_SET_DBOPS_RUNNING_SH_PATH.path()).withVolumeMounts(ClusterStatefulSetVolumeConfig.TEMPLATES.volumeMount(context, volumeMountBuilder -> volumeMountBuilder.withSubPath(ClusterStatefulSetPath.LOCAL_BIN_SET_DBOPS_RUNNING_SH_PATH.filename()).withMountPath(ClusterStatefulSetPath.LOCAL_BIN_SET_DBOPS_RUNNING_SH_PATH.path()).withReadOnly(true)), ClusterStatefulSetVolumeConfig.TEMPLATES.volumeMount(context, volumeMountBuilder -> volumeMountBuilder.withSubPath(ClusterStatefulSetPath.LOCAL_BIN_SHELL_UTILS_PATH.filename()).withMountPath(ClusterStatefulSetPath.LOCAL_BIN_SHELL_UTILS_PATH.path()).withReadOnly(true))).build()).withContainers(new ContainerBuilder().withName("run-dbops").withImage(getRunImage(context)).withImagePullPolicy("IfNotPresent").withEnv(ImmutableList.<EnvVar>builder().addAll(clusterEnvironmentVariables.listResources(context)).add(new EnvVarBuilder().withName("OP_NAME").withValue(dbOps.getSpec().getOp()).build(), new EnvVarBuilder().withName("EXCLUSIVE_OP").withValue(String.valueOf(isExclusiveOp())).build(), new EnvVarBuilder().withName("NORMALIZED_OP_NAME").withValue(UPPERCASE_PATTERN.matcher(dbOps.getSpec().getOp()).replaceAll(result -> " " + result.group(1).toLowerCase(Locale.US))).build(), new EnvVarBuilder().withName("KEBAB_OP_NAME").withValue(UPPERCASE_PATTERN.matcher(dbOps.getSpec().getOp()).replaceAll(result -> "-" + result.group(1).toLowerCase(Locale.US))).build(), new EnvVarBuilder().withName("RUN_SCRIPT_PATH").withValue(Optional.ofNullable(getRunScript()).map(ClusterStatefulSetPath::path).orElse("")).build(), new EnvVarBuilder().withName("TIMEOUT").withValue(timeout).build()).addAll(runEnvVars).build()).withCommand("/bin/sh", "-ex", ClusterStatefulSetPath.LOCAL_BIN_RUN_DBOPS_SH_PATH.path()).withVolumeMounts(ClusterStatefulSetVolumeConfig.SHARED.volumeMount(context), ClusterStatefulSetVolumeConfig.TEMPLATES.volumeMount(context, volumeMountBuilder -> volumeMountBuilder.withSubPath(ClusterStatefulSetPath.LOCAL_BIN_RUN_DBOPS_SH_PATH.filename()).withMountPath(ClusterStatefulSetPath.LOCAL_BIN_RUN_DBOPS_SH_PATH.path()).withReadOnly(true)), ClusterStatefulSetVolumeConfig.TEMPLATES.volumeMount(context, volumeMountBuilder -> volumeMountBuilder.withSubPath(ClusterStatefulSetPath.LOCAL_BIN_SHELL_UTILS_PATH.filename()).withMountPath(ClusterStatefulSetPath.LOCAL_BIN_SHELL_UTILS_PATH.path()).withReadOnly(true)), ClusterStatefulSetVolumeConfig.TEMPLATES.volumeMount(context, volumeMountBuilder -> volumeMountBuilder.withSubPath(getRunScript().filename()).withMountPath(getRunScript().path()).withReadOnly(true))).build(), new ContainerBuilder().withName("set-dbops-result").withImage(StackGresComponent.KUBECTL.findLatestImageName()).withImagePullPolicy("IfNotPresent").withEnv(ImmutableList.<EnvVar>builder().addAll(clusterEnvironmentVariables.listResources(context)).add(new EnvVarBuilder().withName("OP_NAME").withValue(dbOps.getSpec().getOp()).build(), new EnvVarBuilder().withName("NORMALIZED_OP_NAME").withValue(UPPERCASE_PATTERN.matcher(dbOps.getSpec().getOp()).replaceAll(result -> " " + result.group(1).toLowerCase(Locale.US))).build(), new EnvVarBuilder().withName("KEBAB_OP_NAME").withValue(UPPERCASE_PATTERN.matcher(dbOps.getSpec().getOp()).replaceAll(result -> "-" + result.group(1).toLowerCase(Locale.US))).build(), new EnvVarBuilder().withName("SET_RESULT_SCRIPT_PATH").withValue(Optional.ofNullable(getSetResultScript()).map(ClusterStatefulSetPath::path).orElse("")).build(), new EnvVarBuilder().withName("CLUSTER_NAMESPACE").withValue(namespace).build(), new EnvVarBuilder().withName("DB_OPS_NAME").withValue(name).build(), new EnvVarBuilder().withName("DB_OPS_CRD_NAME").withValue(CustomResource.getCRDName(StackGresDbOps.class)).build(), new EnvVarBuilder().withName("JOB_POD_LABELS").withValue(Seq.seq(labels).append(Tuple.tuple("job-name", jobName(dbOps))).map(t -> t.v1 + "=" + t.v2).toString(",")).build()).addAll(Seq.of(DbOpsStatusCondition.values()).map(c -> new EnvVarBuilder().withName("CONDITION_" + c.name()).withValue(conditions.get(c)).build()).toList()).addAll(setResultEnvVars).build()).withCommand("/bin/sh", "-ex", ClusterStatefulSetPath.LOCAL_BIN_SET_DBOPS_RESULT_SH_PATH.path()).withVolumeMounts(ClusterStatefulSetVolumeConfig.SHARED.volumeMount(context), ClusterStatefulSetVolumeConfig.TEMPLATES.volumeMount(context, volumeMountBuilder -> volumeMountBuilder.withSubPath(ClusterStatefulSetPath.LOCAL_BIN_SET_DBOPS_RESULT_SH_PATH.filename()).withMountPath(ClusterStatefulSetPath.LOCAL_BIN_SET_DBOPS_RESULT_SH_PATH.path()).withReadOnly(true)), ClusterStatefulSetVolumeConfig.TEMPLATES.volumeMount(context, volumeMountBuilder -> volumeMountBuilder.withSubPath(ClusterStatefulSetPath.LOCAL_BIN_SHELL_UTILS_PATH.filename()).withMountPath(ClusterStatefulSetPath.LOCAL_BIN_SHELL_UTILS_PATH.path()).withReadOnly(true))).addAllToVolumeMounts(Optional.ofNullable(getSetResultScript()).map(script -> ClusterStatefulSetVolumeConfig.TEMPLATES.volumeMount(context, volumeMountBuilder -> volumeMountBuilder.withSubPath(script.filename()).withMountPath(script.path()).withReadOnly(true))).stream().collect(Collectors.toList())).build()).withVolumes(ClusterStatefulSetVolumeConfig.SHARED.volume(context), new VolumeBuilder(ClusterStatefulSetVolumeConfig.TEMPLATES.volume(context)).editConfigMap().withDefaultMode(// NOPMD
    0555).endConfigMap().build()).endSpec().endTemplate().endSpec().build();
}
Also used : PodSecurityContext(io.fabric8.kubernetes.api.model.PodSecurityContext) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) StackGresCluster(io.stackgres.common.crd.sgcluster.StackGresCluster) Seq(org.jooq.lambda.Seq) StackGresDbOps(io.stackgres.common.crd.sgdbops.StackGresDbOps) Inject(javax.inject.Inject) StackGresDbOpsContext(io.stackgres.operator.conciliation.dbops.StackGresDbOpsContext) Tuple2(org.jooq.lambda.tuple.Tuple2) ImmutableList(com.google.common.collect.ImmutableList) DbOpsEnvironmentVariables(io.stackgres.operator.cluster.factory.DbOpsEnvironmentVariables) ClusterStatefulSetPath(io.stackgres.common.ClusterStatefulSetPath) Locale(java.util.Locale) Map(java.util.Map) LabelFactoryForDbOps(io.stackgres.common.LabelFactoryForDbOps) ContainerBuilder(io.fabric8.kubernetes.api.model.ContainerBuilder) DbOpsUtil(io.stackgres.common.DbOpsUtil) Job(io.fabric8.kubernetes.api.model.batch.v1.Job) ResourceFactory(io.stackgres.operator.conciliation.factory.ResourceFactory) JobBuilder(io.fabric8.kubernetes.api.model.batch.v1.JobBuilder) Unchecked(org.jooq.lambda.Unchecked) ImmutableMap(com.google.common.collect.ImmutableMap) LabelFactoryForCluster(io.stackgres.common.LabelFactoryForCluster) EnvVarBuilder(io.fabric8.kubernetes.api.model.EnvVarBuilder) VolumeBuilder(io.fabric8.kubernetes.api.model.VolumeBuilder) Collectors(java.util.stream.Collectors) JsonMapper(com.fasterxml.jackson.databind.json.JsonMapper) DbOpsStatusCondition(io.stackgres.common.crd.sgdbops.DbOpsStatusCondition) StackGresComponent(io.stackgres.common.StackGresComponent) List(java.util.List) ClusterStatefulSetVolumeConfig(io.stackgres.operator.conciliation.factory.cluster.patroni.ClusterStatefulSetVolumeConfig) Tuple(org.jooq.lambda.tuple.Tuple) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) CustomResource(io.fabric8.kubernetes.client.CustomResource) ContainerBuilder(io.fabric8.kubernetes.api.model.ContainerBuilder) JobBuilder(io.fabric8.kubernetes.api.model.batch.v1.JobBuilder) EnvVar(io.fabric8.kubernetes.api.model.EnvVar) StackGresDbOps(io.stackgres.common.crd.sgdbops.StackGresDbOps) ClusterStatefulSetPath(io.stackgres.common.ClusterStatefulSetPath) VolumeBuilder(io.fabric8.kubernetes.api.model.VolumeBuilder) EnvVarBuilder(io.fabric8.kubernetes.api.model.EnvVarBuilder)

Example 25 with Job

use of com.google.cloud.scheduler.v1.Job in project marduk by entur.

the class KubernetesJobRunner method runJob.

/**
 * Run a Kubernetes job
 *
 * @param cronJobName   name of the CronJob used as a template
 * @param jobNamePrefix prefix for the Kubernetes job name
 * @param envVars       environment variables to be provided to the job
 * @param timestamp     timestamp used to create a unique name for the Kubernetes job.
 */
public void runJob(String cronJobName, String jobNamePrefix, List<EnvVar> envVars, String timestamp) {
    try (final KubernetesClient kubernetesClient = new DefaultKubernetesClient()) {
        String jobName = jobNamePrefix + '-' + timestamp;
        final Job job = retrieveOrCreateJob(jobName, cronJobName, envVars, kubernetesClient);
        final CountDownLatch watchLatch = new CountDownLatch(1);
        MardukPodWatcher mardukPodWatcher = new MardukPodWatcher(job, watchLatch, jobName);
        try (Watch watch = kubernetesClient.pods().inNamespace(kubernetesNamespace).withLabel("job-name", jobName).watch(mardukPodWatcher)) {
            boolean jobCompletedBeforeTimeout = watchLatch.await(jobTimeoutSecond, TimeUnit.SECONDS);
            if (!jobCompletedBeforeTimeout) {
                throw new KubernetesJobRunnerException("Timeout while waiting for the Graph Builder job " + jobName + " to complete.");
            }
            JobStatus status = kubernetesClient.batch().v1().jobs().inNamespace(kubernetesNamespace).withName(jobName).get().getStatus();
            LOGGER.debug("Kubernetes Job status on completion: {}", status);
            // test the pod status rather than the job status since the job status may be out of sync with the pod status
            if (mardukPodWatcher.isSucceeded()) {
                LOGGER.info("The Graph Builder job {} completed successfully.", jobName);
            } else if (mardukPodWatcher.isKubernetesClientError()) {
                throw new KubernetesJobRunnerException("Kubernetes client error while watching the Graph Builder job " + jobName);
            } else {
                throw new KubernetesJobRunnerException("The Graph Builder job " + jobName + " failed.");
            }
        } catch (KubernetesClientException e) {
            throw new KubernetesJobRunnerException("Could not watch pod", e);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new KubernetesJobRunnerException("Interrupted while watching pod", e);
        } finally {
            // Delete job after completion unless there was a Kubernetes error that can be retried
            if (!mardukPodWatcher.isKubernetesClientError() && deleteJobAfterCompletion) {
                LOGGER.info("Deleting job {} after completion.", jobName);
                deleteKubernetesJob(kubernetesClient, job);
                LOGGER.info("Deleted job {} after completion.", jobName);
            }
        }
    }
}
Also used : JobStatus(io.fabric8.kubernetes.api.model.batch.v1.JobStatus) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) DefaultKubernetesClient(io.fabric8.kubernetes.client.DefaultKubernetesClient) Watch(io.fabric8.kubernetes.client.Watch) DefaultKubernetesClient(io.fabric8.kubernetes.client.DefaultKubernetesClient) CronJob(io.fabric8.kubernetes.api.model.batch.v1beta1.CronJob) Job(io.fabric8.kubernetes.api.model.batch.v1.Job) CountDownLatch(java.util.concurrent.CountDownLatch) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException)

Aggregations

Job (org.pentaho.platform.api.scheduler2.Job)94 Test (org.junit.Test)89 Job (io.fabric8.kubernetes.api.model.batch.v1.Job)38 Serializable (java.io.Serializable)25 ArrayList (java.util.ArrayList)24 SimpleJobTrigger (org.pentaho.platform.api.scheduler2.SimpleJobTrigger)21 Job (com.google.cloud.talent.v4beta1.Job)20 HashMap (java.util.HashMap)20 JobScheduleRequest (org.pentaho.platform.web.http.api.resources.JobScheduleRequest)19 ComplexJobTrigger (org.pentaho.platform.api.scheduler2.ComplexJobTrigger)18 SchedulerException (org.pentaho.platform.api.scheduler2.SchedulerException)17 JobServiceClient (com.google.cloud.talent.v4beta1.JobServiceClient)16 Date (java.util.Date)14 IJobFilter (org.pentaho.platform.api.scheduler2.IJobFilter)14 Job (com.google.cloud.video.transcoder.v1.Job)13 TranscoderServiceClient (com.google.cloud.video.transcoder.v1.TranscoderServiceClient)13 JobBuilder (io.fabric8.kubernetes.api.model.batch.v1.JobBuilder)13 IJobTrigger (org.pentaho.platform.api.scheduler2.IJobTrigger)12 Map (java.util.Map)11 Test (org.junit.jupiter.api.Test)10