use of io.mantisrx.runtime.descriptor.SchedulingInfo in project mantis by Netflix.
the class JobScaleUpDownTests method testJobScaleUp.
// //////////////////////Scale up Tests ////////////////////////////////////
@Test
public void testJobScaleUp() throws Exception, InvalidJobException, io.mantisrx.runtime.command.InvalidJobException {
final TestKit probe = new TestKit(system);
Map<ScalingReason, Strategy> smap = new HashMap<>();
smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null));
SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0, 1.0, 1.0, 3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)).build();
String clusterName = "testJobScaleUp";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system, probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher);
// send scale up request
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName + "-1", 1, 2, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleupResp " + scaleResp.message);
assertEquals(SUCCESS, scaleResp.responseCode);
assertEquals(2, scaleResp.getActualNumWorkers());
verify(jobStoreMock, times(1)).storeNewJob(any());
// initial worker
verify(jobStoreMock, times(1)).storeNewWorkers(any(), any());
// scale up worker
verify(jobStoreMock, times(1)).storeNewWorker(any());
verify(jobStoreMock, times(6)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
// initial worker + job master and scale up worker
verify(schedulerMock, times(3)).scheduleWorker(any());
}
use of io.mantisrx.runtime.descriptor.SchedulingInfo in project mantis by Netflix.
the class JobScaleUpDownTests method testJobScaleUpFailsIfNoScaleStrategy.
@Test
public void testJobScaleUpFailsIfNoScaleStrategy() throws Exception {
final TestKit probe = new TestKit(system);
Map<ScalingReason, Strategy> smap = new HashMap<>();
SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0, 1.0, 1.0, 3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)).build();
String clusterName = "testJobScaleUpFailsIfNoScaleStrategy";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system, probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher);
// send scale up request
jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(clusterName + "-1", 1, 2, "", ""), probe.getRef());
JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
System.out.println("ScaleupResp " + scaleResp.message);
assertEquals(CLIENT_ERROR, scaleResp.responseCode);
assertEquals(0, scaleResp.getActualNumWorkers());
verify(jobStoreMock, times(1)).storeNewJob(any());
// initial worker
verify(jobStoreMock, times(1)).storeNewWorkers(any(), any());
// no scale up worker happened
verify(jobStoreMock, times(0)).storeNewWorker(any());
verify(jobStoreMock, times(3)).updateWorker(any());
verify(jobStoreMock, times(3)).updateJob(any());
// initial worker only
verify(schedulerMock, times(1)).scheduleWorker(any());
}
use of io.mantisrx.runtime.descriptor.SchedulingInfo in project mantis by Netflix.
the class WorkerRegistryV2Test method testJobShutdown.
@Test
public void testJobShutdown() {
WorkerRegistryV2 workerRegistryV2 = new WorkerRegistryV2();
LifecycleEventPublisher eventPublisher = new LifecycleEventPublisherImpl(new AuditEventSubscriberLoggingImpl(), new StatusEventSubscriberLoggingImpl(), new DummyWorkerEventSubscriberImpl(workerRegistryV2));
Map<StageScalingPolicy.ScalingReason, StageScalingPolicy.Strategy> smap = new HashMap<>();
smap.put(StageScalingPolicy.ScalingReason.CPU, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.CPU, 0.5, 0.75, null));
smap.put(StageScalingPolicy.ScalingReason.DataDrop, new StageScalingPolicy.Strategy(StageScalingPolicy.ScalingReason.DataDrop, 0.0, 2.0, null));
SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0, 1.0, 1.0, 3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)).build();
String clusterName = "testJobShutdown";
MantisScheduler schedulerMock = mock(MantisScheduler.class);
MantisJobStore jobStoreMock = mock(MantisJobStore.class);
try {
ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system, probe, clusterName, sInfo, schedulerMock, jobStoreMock, eventPublisher);
assertEquals(2, workerRegistryV2.getNumRunningWorkers());
jobActor.tell(new JobClusterProto.KillJobRequest(new JobId(clusterName, 1), "test reason", JobCompletedReason.Normal, "nj", probe.getRef()), probe.getRef());
probe.expectMsgClass(JobClusterProto.KillJobResponse.class);
Thread.sleep(1000);
int cnt = 0;
for (int i = 0; i < 100; i++) {
cnt++;
if (workerRegistryV2.getNumRunningWorkers() == 0) {
break;
}
}
assertTrue(cnt < 100);
// assertEquals(0, WorkerRegistryV2.INSTANCE.getNumRunningWorkers());
} catch (Exception e) {
e.printStackTrace();
}
}
use of io.mantisrx.runtime.descriptor.SchedulingInfo in project mantis by Netflix.
the class JobDefinitionResolverTest method versionSchedPresentTest.
@Test
public void versionSchedPresentTest() {
String clusterName = "versionSchedPresentTest";
List<Label> labels = new ArrayList<>();
Label label = new Label("l1", "lv1");
labels.add(label);
List<Parameter> parameters = new ArrayList<>();
Parameter parameter = new Parameter("paramName", "paramValue");
parameters.add(parameter);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters);
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster, 1, false);
String version = "0.0.1";
JobConstraints softConstraints = JobConstraints.ExclusiveHost;
List<JobConstraints> constraintsList = new ArrayList<>();
constraintsList.add(softConstraints);
SchedulingInfo schedulingInfo = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), constraintsList).build();
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withSchedulingInfo(schedulingInfo).withVersion(version).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// artifact will get populated using the given version.
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// scheduling info will be the one specified by us
assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo());
// version should match what we set.
assertEquals(version, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
// Only version is specified
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withVersion(version).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// assert the artifact is inherited
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// assert the scheduling info is inherited
assertEquals(SINGLE_WORKER_SCHED_INFO, resolvedJobDefinition.getSchedulingInfo());
// assert a version is the one we gave
assertEquals(version, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
use of io.mantisrx.runtime.descriptor.SchedulingInfo in project mantis by Netflix.
the class JobDefinitionResolverTest method SchedPresentTest.
@Test
public void SchedPresentTest() {
String clusterName = "SchedPresentTest";
List<Label> labels = new ArrayList<>();
Label label = new Label("l1", "lv1");
labels.add(label);
List<Parameter> parameters = new ArrayList<>();
Parameter parameter = new Parameter("paramName", "paramValue");
parameters.add(parameter);
final JobClusterDefinitionImpl fakeJobCluster = createFakeJobClusterDefn(clusterName, labels, parameters);
IJobClusterMetadata jobClusterMetadata = new JobClusterMetadataImpl(fakeJobCluster, 1, false);
JobConstraints softConstraints = JobConstraints.ExclusiveHost;
List<JobConstraints> constraintsList = new ArrayList<>();
constraintsList.add(softConstraints);
SchedulingInfo schedulingInfo = new SchedulingInfo.Builder().numberOfStages(1).singleWorkerStageWithConstraints(DEFAULT_MACHINE_DEFINITION, Lists.newArrayList(), constraintsList).build();
try {
// only sched info set.
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withSchedulingInfo(schedulingInfo).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// artifact will get populated using the given version.
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// scheduling info will be the one specified by us
assertEquals(schedulingInfo, resolvedJobDefinition.getSchedulingInfo());
// version should match the latest on the cluster
assertEquals(DEFAULT_VERSION, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
// NOTHING is specified
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// assert the artifact is inherited
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// assert the scheduling info is inherited
assertEquals(SINGLE_WORKER_SCHED_INFO, resolvedJobDefinition.getSchedulingInfo());
// assert a version is the dfeault one.
assertEquals(DEFAULT_VERSION, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
// NOTHING is specified2
try {
JobDefinition givenJobDefn = new JobDefinition.Builder().withName(clusterName).withVersion("null").build();
JobDefinitionResolver resolver = new JobDefinitionResolver();
JobDefinition resolvedJobDefinition = resolver.getResolvedJobDefinition("user", givenJobDefn, jobClusterMetadata);
// assert the artifact is inherited
assertEquals(DEFAULT_ARTIFACT_NAME, resolvedJobDefinition.getArtifactName());
// assert the scheduling info is inherited
assertEquals(SINGLE_WORKER_SCHED_INFO, resolvedJobDefinition.getSchedulingInfo());
// assert a version is the dfeault one.
assertEquals(DEFAULT_VERSION, resolvedJobDefinition.getVersion());
// assert the parameters and labels are inherited since they were not specified
assertEquals(1, resolvedJobDefinition.getLabels().size());
assertEquals(label, resolvedJobDefinition.getLabels().get(0));
assertEquals(1, resolvedJobDefinition.getParameters().size());
assertEquals(parameter, resolvedJobDefinition.getParameters().get(0));
} catch (InvalidJobException e) {
e.printStackTrace();
fail();
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
Aggregations