use of com.google.cloud.talent.v4beta1.Job in project pentaho-platform by pentaho.
the class QuartzSchedulerTest method testSetJobNextRunToTheFuture.
@Test
public void testSetJobNextRunToTheFuture() {
Trigger trigger = Mockito.mock(Trigger.class);
Job job = new Job();
QuartzScheduler quartzScheduler = new QuartzScheduler();
long nowDate = new Date().getTime();
long futureDate = nowDate + 1000000000;
Mockito.when(trigger.getNextFireTime()).thenReturn(new Date(futureDate));
Mockito.when(trigger.getFireTimeAfter(any())).thenReturn(new Date(nowDate));
quartzScheduler.setJobNextRun(job, trigger);
assertEquals(new Date(futureDate), job.getNextRun());
}
use of com.google.cloud.talent.v4beta1.Job in project pentaho-platform by pentaho.
the class QuartzSchedulerTest method testSetJobNextRunToThePast.
@Test
public void testSetJobNextRunToThePast() {
Trigger trigger = Mockito.mock(Trigger.class);
Job job = new Job();
QuartzScheduler quartzScheduler = new QuartzScheduler();
long nowDate = new Date().getTime();
long pastDate = nowDate - 1000000000;
Mockito.when(trigger.getNextFireTime()).thenReturn(new Date(pastDate));
Mockito.when(trigger.getFireTimeAfter(any())).thenReturn(new Date(nowDate));
quartzScheduler.setJobNextRun(job, trigger);
assertEquals(new Date(nowDate), job.getNextRun());
}
use of com.google.cloud.talent.v4beta1.Job in project pentaho-platform by pentaho.
the class JobAdapter method unmarshal.
public Job unmarshal(JaxbSafeJob jaxbSafeJob) throws Exception {
if (jaxbSafeJob == null) {
return null;
}
Job job = new Job();
try {
job.setJobTrigger(jaxbSafeJob.jobTrigger);
job.setJobParams(toProperMap(jaxbSafeJob.jobParams));
job.setLastRun(jaxbSafeJob.lastRun);
job.setNextRun(jaxbSafeJob.nextRun);
job.setSchedulableClass(jaxbSafeJob.schedulableClass);
job.setJobId(jaxbSafeJob.jobId);
job.setUserName(jaxbSafeJob.userName);
job.setJobName(jaxbSafeJob.jobName);
job.setState(jaxbSafeJob.state);
} catch (Throwable t) {
// no message bundle since this is a development error case
// $NON-NLS-1$
logger.error("Error unmarshalling job", t);
return null;
}
return job;
}
use of com.google.cloud.talent.v4beta1.Job in project pentaho-platform by pentaho.
the class SolutionImportHandlerIT method testImportSchedules.
@Test
public void testImportSchedules() throws PlatformImportException, SchedulerException {
SolutionImportHandler importHandler = new SolutionImportHandler(Collections.emptyList());
importHandler = spy(importHandler);
List<JobScheduleRequest> requests = new ArrayList<>(4);
requests.add(createJobScheduleRequest("NORMAL", JobState.NORMAL));
requests.add(createJobScheduleRequest("PAUSED", JobState.PAUSED));
requests.add(createJobScheduleRequest("PAUSED", JobState.COMPLETE));
requests.add(createJobScheduleRequest("PAUSED", JobState.ERROR));
doReturn(new ArrayList<Job>()).when(importHandler).getAllJobs(any());
importHandler.importSchedules(requests);
List<Job> jobs = scheduler.getJobs(job -> true);
assertEquals(4, jobs.size());
for (Job job : jobs) {
assertEquals(job.getJobName(), job.getState().toString());
}
}
use of com.google.cloud.talent.v4beta1.Job in project cdap by caskdata.
the class DataprocRuntimeJobManager method launch.
@Override
public void launch(RuntimeJobInfo runtimeJobInfo) throws Exception {
String bucket = DataprocUtils.getBucketName(this.bucket);
ProgramRunInfo runInfo = runtimeJobInfo.getProgramRunInfo();
LOG.debug("Launching run {} with following configurations: cluster {}, project {}, region {}, bucket {}.", runInfo.getRun(), clusterName, projectId, region, bucket);
// TODO: CDAP-16408 use fixed directory for caching twill, application, artifact jars
File tempDir = Files.createTempDirectory("dataproc.launcher").toFile();
// on dataproc bucket the run root will be <bucket>/cdap-job/<runid>/. All the files for this run will be copied
// under that base dir.
String runRootPath = getPath(DataprocUtils.CDAP_GCS_ROOT, runInfo.getRun());
try {
// step 1: build twill.jar and launcher.jar and add them to files to be copied to gcs
List<LocalFile> localFiles = getRuntimeLocalFiles(runtimeJobInfo.getLocalizeFiles(), tempDir);
// step 2: upload all the necessary files to gcs so that those files are available to dataproc job
List<Future<LocalFile>> uploadFutures = new ArrayList<>();
for (LocalFile fileToUpload : localFiles) {
String targetFilePath = getPath(runRootPath, fileToUpload.getName());
uploadFutures.add(provisionerContext.execute(() -> uploadFile(bucket, targetFilePath, fileToUpload)).toCompletableFuture());
}
List<LocalFile> uploadedFiles = new ArrayList<>();
for (Future<LocalFile> uploadFuture : uploadFutures) {
uploadedFiles.add(uploadFuture.get());
}
// step 3: build the hadoop job request to be submitted to dataproc
SubmitJobRequest request = getSubmitJobRequest(runtimeJobInfo, uploadedFiles);
// step 4: submit hadoop job to dataproc
try {
Job job = getJobControllerClient().submitJob(request);
LOG.debug("Successfully submitted hadoop job {} to cluster {}.", job.getReference().getJobId(), clusterName);
} catch (AlreadyExistsException ex) {
// the job id already exists, ignore the job.
LOG.warn("The dataproc job {} already exists. Ignoring resubmission of the job.", request.getJob().getReference().getJobId());
}
DataprocUtils.emitMetric(provisionerContext, region, "provisioner.submitJob.response.count");
} catch (Exception e) {
// delete all uploaded gcs files in case of exception
DataprocUtils.deleteGCSPath(getStorageClient(), bucket, runRootPath);
DataprocUtils.emitMetric(provisionerContext, region, "provisioner.submitJob.response.count", e);
throw new Exception(String.format("Error while launching job %s on cluster %s", getJobId(runInfo), clusterName), e);
} finally {
// delete local temp directory
deleteDirectoryContents(tempDir);
}
}
Aggregations