use of com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates.startTasksInNewJob in project titus-control-plane by Netflix.
the class V3JobSchedulingAndRebootTest method submitBatchJobAndRebootTitusMaster.
@Test(timeout = LONG_TEST_TIMEOUT_MS)
public void submitBatchJobAndRebootTitusMaster() {
jobsScenarioBuilder.schedule(ONE_TASK_BATCH_JOB, jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.startTasksInNewJob()));
jobsScenarioBuilder.stop();
titusStackResource.getMaster().reboot();
JobsScenarioBuilder newJobsScenarioBuilder = new JobsScenarioBuilder(titusStackResource.getOperations());
newJobsScenarioBuilder.assertJobs(jobs -> jobs.size() == 1).takeJob(0).assertJob(JobAsserts.jobInState(JobState.Accepted)).assertTasks(tasks -> tasks.size() == 1);
}
use of com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates.startTasksInNewJob in project titus-control-plane by Netflix.
the class JobIpAllocationsTest method testWaitingTaskContext.
/**
* Tests a job waiting for an in use IP allocation has updated task context fields.
*/
@Test(timeout = 30_000)
// TODO Read static IP allocation status from pod message field, and add it to the task context.
@Ignore
public void testWaitingTaskContext() throws Exception {
JobDescriptor<ServiceJobExt> firstIpJobDescriptor = ONE_TASK_SERVICE_JOB;
JobDescriptor<ServiceJobExt> secondIpJobDescriptor = firstIpJobDescriptor.but(j -> j.getJobGroupInfo().toBuilder().withSequence("v001"));
// Schedule the first task and ensure it's in the correct zone with the correct task context
jobsScenarioBuilder.schedule(firstIpJobDescriptor, jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.startTasksInNewJob()).allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectTaskContext(TaskAttributes.TASK_ATTRIBUTES_IP_ALLOCATION_ID, getIpAllocationIdFromJob(0, firstIpJobDescriptor))).allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectZoneId(getZoneFromJobIpAllocation(0, firstIpJobDescriptor))));
String firstJobId = jobsScenarioBuilder.takeJob(0).getJobId();
// Schedule the second task and ensure it's blocked on the first task
jobsScenarioBuilder.schedule(secondIpJobDescriptor, jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.jobAccepted()).expectAllTasksCreated().allTasks(taskScenarioBuilder -> taskScenarioBuilder.expectStateUpdates(TaskStatus.TaskState.Accepted)));
String secondJobId = jobsScenarioBuilder.takeJob(1).getJobId();
// Query the gRPC endpoint and ensure the first task does not have a waiting task context field.
TaskQueryResult firstTaskQueryResult = client.findTasks(TaskQuery.newBuilder().setPage(Page.newBuilder().setPageSize(100).build()).putFilteringCriteria("jobIds", firstJobId).build());
assertThat(firstTaskQueryResult.getItemsCount()).isEqualTo(1);
firstTaskQueryResult.getItemsList().forEach(task -> {
assertThat(task.getTaskContextMap()).doesNotContainKeys(TaskAttributes.TASK_ATTRIBUTES_IN_USE_IP_ALLOCATION);
});
String firstTaskId = firstTaskQueryResult.getItems(0).getId();
// Query the gRPC endpoint and ensure the second task has a waiting task context field.
TaskQueryResult secondTaskQueryResult = client.findTasks(TaskQuery.newBuilder().setPage(Page.newBuilder().setPageSize(100).build()).putFilteringCriteria("jobIds", secondJobId).build());
assertThat(secondTaskQueryResult.getItemsCount()).isEqualTo(1);
secondTaskQueryResult.getItemsList().forEach(task -> {
assertThat(task.getTaskContextMap()).contains(new AbstractMap.SimpleImmutableEntry<>(TaskAttributes.TASK_ATTRIBUTES_IN_USE_IP_ALLOCATION, firstTaskId));
});
// Observe the second job and ensure the streamed task has a waiting task context field.
boolean verified = false;
Iterator<JobChangeNotification> it = client.observeJob(JobId.newBuilder().setId(secondJobId).build());
while (it.hasNext()) {
JobChangeNotification jobChangeNotification = it.next();
if (jobChangeNotification.hasTaskUpdate()) {
Map<String, String> taskContext = jobChangeNotification.getTaskUpdate().getTask().getTaskContextMap();
assertThat(taskContext).contains(new AbstractMap.SimpleImmutableEntry<>(TaskAttributes.TASK_ATTRIBUTES_IN_USE_IP_ALLOCATION, firstTaskId));
verified = true;
} else if (jobChangeNotification.hasSnapshotEnd()) {
break;
}
}
assertThat(verified).isTrue();
}
use of com.netflix.titus.master.integration.v3.scenario.ScenarioTemplates.startTasksInNewJob in project titus-control-plane by Netflix.
the class JobSubmitAndControlBasicTest method testSubmitBatchJobWithEfsMount.
/**
* Verify batch job submit with the expected state transitions. Verify agent receives proper EFS mount data.
*/
@Test(timeout = 30_000)
public void testSubmitBatchJobWithEfsMount() {
EfsMount efsMount1 = ContainersGenerator.efsMounts().getValue().toBuilder().withMountPoint("/data/logs").build();
EfsMount efsMount2 = ContainersGenerator.efsMounts().skip(1).getValue().toBuilder().withMountPoint("/data").build();
List<EfsMount> efsMounts = asList(efsMount1, efsMount2);
List<EfsMount> expectedOrder = asList(efsMount2, efsMount1);
JobDescriptor<BatchJobExt> jobWithEfs = ONE_TASK_BATCH_JOB.but(jd -> jd.getContainer().but(c -> c.getContainerResources().toBuilder().withEfsMounts(efsMounts)));
jobsScenarioBuilder.schedule(jobWithEfs, jobScenarioBuilder -> jobScenarioBuilder.template(ScenarioTemplates.startTasksInNewJob()).assertEachPod(podWithEfsMounts(expectedOrder), "Container not assigned the expected EFS mount").allTasks(ScenarioTemplates.completeTask()).template(ScenarioTemplates.jobFinished()).expectJobEventStreamCompletes());
}
Aggregations