use of com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt in project titus-control-plane by Netflix.
the class CassandraJobStoreTest method testStoreTask.
@Test
public void testStoreTask() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createTaskObject(job);
store.storeTask(task).await();
Task retrievedTask = store.retrieveTask(task.getId()).toBlocking().first();
checkRetrievedTask(task, retrievedTask);
}
use of com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt in project titus-control-plane by Netflix.
the class CassandraJobStoreTest method testRetrieveTasksForJob.
@Test
public void testRetrieveTasksForJob() {
JobStore store = getJobStore();
Job<BatchJobExt> job = createBatchJobObject();
store.init().await();
store.storeJob(job).await();
Pair<List<Job<?>>, Integer> jobsAndErrors = store.retrieveJobs().toBlocking().first();
checkRetrievedJob(job, jobsAndErrors.getLeft().get(0));
Task task = createTaskObject(job);
store.storeTask(task).await();
Pair<List<Task>, Integer> tasks = store.retrieveTasksForJob(job.getId()).toBlocking().first();
checkRetrievedTask(task, tasks.getLeft().get(0));
// Check that archive access does not return anything.
Task archivedTask = store.retrieveArchivedTasksForJob(job.getId()).toBlocking().firstOrDefault(null);
assertThat(archivedTask).isNull();
}
use of com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt in project titus-control-plane by Netflix.
the class CassandraJobStoreTest method testActiveJobIdDistribution.
/**
* Create enough jobs to evenly be bucketed across multiple rows. Delete 1 job per bucket. Add back enough jobs to fill
* in the deleted jobs plus an extra bucket worth as a new bucket was created when reaching the max of all the original buckets.
*/
@Test
public void testActiveJobIdDistribution() {
int numberOfJobsToCreate = 100;
int numberOfBuckets = numberOfJobsToCreate / MAX_BUCKET_SIZE;
Session session = cassandraCqlUnit.getSession();
JobStore store = getJobStore(session);
store.init().await();
List<Job<?>> createdJobs = new ArrayList<>();
List<Completable> completables = new ArrayList<>();
for (int i = 0; i < numberOfJobsToCreate; i++) {
Job<BatchJobExt> job = createBatchJobObject();
createdJobs.add(job);
completables.add(store.storeJob(job));
}
Completable.merge(Observable.from(completables), MAX_CONCURRENCY).await();
Pair<List<Job<?>>, Integer> retrievedJobsAndErrors = store.retrieveJobs().toBlocking().first();
assertThat(retrievedJobsAndErrors.getLeft()).hasSize(numberOfJobsToCreate);
assertItemsPerBucket(session, numberOfBuckets, MAX_BUCKET_SIZE);
int j = 0;
int jobsRemoved = 0;
completables = new ArrayList<>();
while (j < numberOfJobsToCreate) {
Job<?> jobToRemove = createdJobs.get(j);
completables.add(store.deleteJob(jobToRemove));
j += MAX_BUCKET_SIZE;
jobsRemoved++;
}
Completable.merge(Observable.from(completables), MAX_CONCURRENCY).await();
assertItemsPerBucket(session, numberOfBuckets, MAX_BUCKET_SIZE - 1);
completables = new ArrayList<>();
for (int i = 0; i < jobsRemoved + MAX_BUCKET_SIZE; i++) {
Job<BatchJobExt> job = createBatchJobObject();
completables.add(store.storeJob(job));
}
Completable.merge(Observable.from(completables), MAX_CONCURRENCY).await();
retrievedJobsAndErrors = store.retrieveJobs().toBlocking().first();
assertThat(retrievedJobsAndErrors.getLeft()).hasSize(numberOfJobsToCreate + MAX_BUCKET_SIZE);
assertItemsPerBucket(session, numberOfBuckets + 1, MAX_BUCKET_SIZE);
}
use of com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt in project titus-control-plane by Netflix.
the class DefaultPodAffinityFactoryTest method testEbsVolumeAzAffinity.
@Test
public void testEbsVolumeAzAffinity() {
Job<BatchJobExt> job = JobGenerator.oneBatchJob();
List<EbsVolume> ebsVolumes = JobEbsVolumeGenerator.jobEbsVolumes(1).toList();
Map<String, String> ebsVolumeAttributes = JobEbsVolumeGenerator.jobEbsVolumesToAttributes(ebsVolumes);
job = job.toBuilder().withJobDescriptor(JobFunctions.jobWithEbsVolumes(job.getJobDescriptor(), ebsVolumes, ebsVolumeAttributes)).build();
Pair<V1Affinity, Map<String, String>> affinityWithAnnotations = factory.buildV1Affinity(job, JobEbsVolumeGenerator.appendEbsVolumeAttribute(JobGenerator.oneBatchTask(), ebsVolumes.get(0).getVolumeId()));
V1NodeSelector nodeSelector = affinityWithAnnotations.getLeft().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution();
assertThat(nodeSelector.getNodeSelectorTerms()).hasSize(1);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getKey()).isEqualTo(KubeConstants.NODE_LABEL_ZONE);
assertThat(nodeSelector.getNodeSelectorTerms().get(0).getMatchExpressions().get(0).getValues().get(0)).isEqualTo(ebsVolumes.get(0).getVolumeAvailabilityZone());
}
use of com.netflix.titus.api.jobmanager.model.job.ext.BatchJobExt in project titus-control-plane by Netflix.
the class DefaultPodAffinityFactoryTest method newGpuJob.
private Job<BatchJobExt> newGpuJob(Map<String, String> hardConstraints) {
Job<BatchJobExt> template = JobGenerator.oneBatchJob();
JobDescriptor<BatchJobExt> jobDescriptor = template.getJobDescriptor();
Container container = jobDescriptor.getContainer();
return template.toBuilder().withJobDescriptor(jobDescriptor.toBuilder().withContainer(container.toBuilder().withContainerResources(container.getContainerResources().toBuilder().withGpu(1).build()).withHardConstraints(hardConstraints).build()).build()).build();
}
Aggregations