use of com.google.cloud.video.transcoder.v1.Job in project strimzi-kafka-operator by strimzi.
the class OauthAbstractST method tearDownEach.
@AfterEach
void tearDownEach(ExtensionContext extensionContext) throws Exception {
List<Job> clusterJobList = kubeClient().getJobList().getItems().stream().filter(job -> job.getMetadata().getName().contains(mapWithClusterNames.get(extensionContext.getDisplayName()))).collect(Collectors.toList());
for (Job job : clusterJobList) {
LOGGER.info("Deleting {} job", job.getMetadata().getName());
JobUtils.deleteJobWithWait(job.getMetadata().getNamespace(), job.getMetadata().getName());
}
}
use of com.google.cloud.video.transcoder.v1.Job in project hugegraph-computer by hugegraph.
the class ComputerJobController method observeComponent.
private ComputerJobComponent observeComponent(HugeGraphComputerJob computerJob) {
ComputerJobComponent observed = new ComputerJobComponent();
observed.computerJob(computerJob);
String namespace = computerJob.getMetadata().getNamespace();
String crName = computerJob.getMetadata().getName();
String masterName = KubeUtil.masterJobName(crName);
Job master = this.getResourceByName(namespace, masterName, Job.class);
observed.masterJob(master);
if (master != null) {
List<Pod> masterPods = this.getPodsByJob(master);
observed.masterPods(masterPods);
}
String workerName = KubeUtil.workerJobName(crName);
Job worker = this.getResourceByName(namespace, workerName, Job.class);
observed.workerJob(worker);
if (worker != null) {
List<Pod> workerPods = this.getPodsByJob(worker);
observed.workerPods(workerPods);
}
String configMapName = KubeUtil.configMapName(crName);
ConfigMap configMap = this.getResourceByName(namespace, configMapName, ConfigMap.class);
observed.configMap(configMap);
return observed;
}
use of com.google.cloud.video.transcoder.v1.Job in project hugegraph-computer by hugegraph.
the class ComputerJobDeployer method reconcileComponent.
private void reconcileComponent(String namespace, ComputerJobComponent desired, ComputerJobComponent observed) {
ConfigMap desiredConfigMap = desired.configMap();
ConfigMap observedConfigMap = observed.configMap();
final KubernetesClient client;
if (!Objects.equals(this.kubeClient.getNamespace(), namespace)) {
client = this.kubeClient.inNamespace(namespace);
} else {
client = this.kubeClient;
}
if (desiredConfigMap == null && observedConfigMap != null) {
client.configMaps().delete(observedConfigMap);
} else if (desiredConfigMap != null && observedConfigMap == null) {
KubeUtil.ignoreExists(() -> client.configMaps().create(desiredConfigMap));
}
if (desiredConfigMap != null && observedConfigMap != null) {
LOG.debug("ConfigMap already exists, no action");
}
Job desiredMasterJob = desired.masterJob();
Job observedMasterJob = observed.masterJob();
if (desiredMasterJob == null && observedMasterJob != null) {
client.batch().v1().jobs().delete(observedMasterJob);
} else if (desiredMasterJob != null && observedMasterJob == null) {
KubeUtil.ignoreExists(() -> client.batch().v1().jobs().create(desiredMasterJob));
}
if (desiredMasterJob != null && observedMasterJob != null) {
LOG.debug("MasterJob already exists, no action");
}
Job desiredWorkerJob = desired.workerJob();
Job observedWorkerJob = observed.workerJob();
if (desiredWorkerJob == null && observedWorkerJob != null) {
client.batch().v1().jobs().delete(observedWorkerJob);
} else if (desiredWorkerJob != null && observedWorkerJob == null) {
KubeUtil.ignoreExists(() -> client.batch().v1().jobs().create(desiredWorkerJob));
}
if (desiredWorkerJob != null && observedWorkerJob != null) {
LOG.debug("WorkerJob already exists, no action");
}
}
use of com.google.cloud.video.transcoder.v1.Job in project cdap by cdapio.
the class DataprocRuntimeJobManager method launch.
@Override
public void launch(RuntimeJobInfo runtimeJobInfo) throws Exception {
String bucket = DataprocUtils.getBucketName(this.bucket);
ProgramRunInfo runInfo = runtimeJobInfo.getProgramRunInfo();
LOG.debug("Launching run {} with following configurations: cluster {}, project {}, region {}, bucket {}.", runInfo.getRun(), clusterName, projectId, region, bucket);
// TODO: CDAP-16408 use fixed directory for caching twill, application, artifact jars
File tempDir = Files.createTempDirectory("dataproc.launcher").toFile();
// on dataproc bucket the run root will be <bucket>/cdap-job/<runid>/. All the files for this run will be copied
// under that base dir.
String runRootPath = getPath(DataprocUtils.CDAP_GCS_ROOT, runInfo.getRun());
try {
// step 1: build twill.jar and launcher.jar and add them to files to be copied to gcs
List<LocalFile> localFiles = getRuntimeLocalFiles(runtimeJobInfo.getLocalizeFiles(), tempDir);
// step 2: upload all the necessary files to gcs so that those files are available to dataproc job
List<Future<LocalFile>> uploadFutures = new ArrayList<>();
for (LocalFile fileToUpload : localFiles) {
String targetFilePath = getPath(runRootPath, fileToUpload.getName());
uploadFutures.add(provisionerContext.execute(() -> uploadFile(bucket, targetFilePath, fileToUpload)).toCompletableFuture());
}
List<LocalFile> uploadedFiles = new ArrayList<>();
for (Future<LocalFile> uploadFuture : uploadFutures) {
uploadedFiles.add(uploadFuture.get());
}
// step 3: build the hadoop job request to be submitted to dataproc
SubmitJobRequest request = getSubmitJobRequest(runtimeJobInfo, uploadedFiles);
// step 4: submit hadoop job to dataproc
try {
Job job = getJobControllerClient().submitJob(request);
LOG.debug("Successfully submitted hadoop job {} to cluster {}.", job.getReference().getJobId(), clusterName);
} catch (AlreadyExistsException ex) {
// the job id already exists, ignore the job.
LOG.warn("The dataproc job {} already exists. Ignoring resubmission of the job.", request.getJob().getReference().getJobId());
}
DataprocUtils.emitMetric(provisionerContext, region, "provisioner.submitJob.response.count");
} catch (Exception e) {
// delete all uploaded gcs files in case of exception
DataprocUtils.deleteGCSPath(getStorageClient(), bucket, runRootPath);
DataprocUtils.emitMetric(provisionerContext, region, "provisioner.submitJob.response.count", e);
throw new Exception(String.format("Error while launching job %s on cluster %s", getJobId(runInfo), clusterName), e);
} finally {
// delete local temp directory
deleteDirectoryContents(tempDir);
}
}
use of com.google.cloud.video.transcoder.v1.Job in project cmcc-operator by T-Systems-MMS.
the class DefaultTargetState method isReady.
private boolean isReady(String jobName) {
jobName = concatOptional(cmcc.getSpec().getDefaults().getNamePrefix(), jobName);
Job job = kubernetesClient.batch().v1().jobs().inNamespace(cmcc.getMetadata().getNamespace()).withName(jobName).get();
boolean ready = job != null && job.getStatus() != null && job.getStatus().getSucceeded() != null && job.getStatus().getSucceeded() > 0;
if (ready) {
log.debug("job {}: has succeeded", jobName);
} else {
log.debug("job {}: waiting for successful completion", jobName);
}
return ready;
}
Aggregations