Search in sources :

Example 1 with JsonProcessingException

use of io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException in project mantis by Netflix.

the class LabelUtilsTest method testSerDe.

@Test
public void testSerDe() {
    Label l = new Label("k1", "v1");
    ObjectMapper mapper = new ObjectMapper();
    try {
        System.out.println(mapper.writeValueAsString(l));
    } catch (JsonProcessingException e) {
        fail();
    }
}
Also used : Label(io.mantisrx.common.Label) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException) ObjectMapper(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test)

Example 2 with JsonProcessingException

use of io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException in project mantis by Netflix.

the class VirtualMachineMasterServiceMesosImpl method createTaskInfo.

private Collection<TaskInfo> createTaskInfo(Protos.SlaveID slaveID, final LaunchTaskRequest launchTaskRequest) throws LaunchTaskException {
    final ScheduleRequest scheduleRequest = launchTaskRequest.getScheduleRequest();
    String name = scheduleRequest.getWorkerId().getJobCluster() + " (stage: " + scheduleRequest.getStageNum() + " of " + scheduleRequest.getJobMetadata().getTotalStages() + ")";
    TaskID taskId = TaskID.newBuilder().setValue(scheduleRequest.getWorkerId().getId()).build();
    MachineDefinition machineDefinition = scheduleRequest.getMachineDefinition();
    // grab ports within range
    List<Integer> ports = launchTaskRequest.getPorts().getAllPorts();
    TaskInfo taskInfo = null;
    try {
        TaskInfo.Builder taskInfoBuilder = TaskInfo.newBuilder();
        ExecuteStageRequest executeStageRequest = new ExecuteStageRequest(scheduleRequest.getWorkerId().getJobCluster(), scheduleRequest.getWorkerId().getJobId(), scheduleRequest.getWorkerId().getWorkerIndex(), scheduleRequest.getWorkerId().getWorkerNum(), scheduleRequest.getJobMetadata().getJobJarUrl(), scheduleRequest.getStageNum(), scheduleRequest.getJobMetadata().getTotalStages(), ports, getTimeoutSecsToReportStart(), launchTaskRequest.getPorts().getMetricsPort(), scheduleRequest.getJobMetadata().getParameters(), scheduleRequest.getJobMetadata().getSchedulingInfo(), scheduleRequest.getDurationType(), scheduleRequest.getJobMetadata().getSubscriptionTimeoutSecs(), scheduleRequest.getJobMetadata().getMinRuntimeSecs() - (System.currentTimeMillis() - scheduleRequest.getJobMetadata().getMinRuntimeSecs()), launchTaskRequest.getPorts());
        taskInfoBuilder.setName(name).setTaskId(taskId).setSlaveId(slaveID).addResources(Resource.newBuilder().setName("cpus").setType(Value.Type.SCALAR).setScalar(Value.Scalar.newBuilder().setValue(machineDefinition.getCpuCores()))).addResources(Resource.newBuilder().setName("mem").setType(Value.Type.SCALAR).setScalar(Value.Scalar.newBuilder().setValue(machineDefinition.getMemoryMB()))).addResources(Resource.newBuilder().setName("disk").setType(Value.Type.SCALAR).setScalar(Value.Scalar.newBuilder().setValue(machineDefinition.getDiskMB()))).addResources(Resource.newBuilder().setName("network").setType(Value.Type.SCALAR).setScalar(Value.Scalar.newBuilder().setValue(machineDefinition.getNetworkMbps()))).setExecutor(createMantisWorkerExecutor(executeStageRequest, launchTaskRequest, machineDefinition.getMemoryMB(), machineDefinition.getCpuCores())).setData(ByteString.copyFrom(mapper.writeValueAsBytes(executeStageRequest)));
        if (!ports.isEmpty()) {
            for (Integer port : ports) {
                // add ports
                taskInfoBuilder.addResources(Resource.newBuilder().setName("ports").setType(Value.Type.RANGES).setRanges(Value.Ranges.newBuilder().addRange(Value.Range.newBuilder().setBegin(port).setEnd(port))));
            }
        }
        taskInfo = taskInfoBuilder.build();
    } catch (JsonProcessingException e) {
        throw new LaunchTaskException("Failed to build a TaskInfo instance: " + e.getMessage(), e);
    }
    List<TaskInfo> tasks = new ArrayList<>(1);
    tasks.add(taskInfo);
    return tasks;
}
Also used : TaskID(org.apache.mesos.Protos.TaskID) MachineDefinition(io.mantisrx.runtime.MachineDefinition) ScheduleRequest(io.mantisrx.server.master.scheduler.ScheduleRequest) ArrayList(java.util.ArrayList) LaunchTaskException(io.mantisrx.server.master.LaunchTaskException) ByteString(com.google.protobuf.ByteString) ExecuteStageRequest(io.mantisrx.server.core.ExecuteStageRequest) TaskInfo(org.apache.mesos.Protos.TaskInfo) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException)

Example 3 with JsonProcessingException

use of io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException in project mantis by Netflix.

the class SimpleSchedulerObserver method main.

public static void main(String[] args) {
    objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
    objectMapper.registerModule(new Jdk8Module());
    try {
        Args.parse(SimpleSchedulerObserver.class, args);
    } catch (IllegalArgumentException e) {
        Args.usage(SimpleSchedulerObserver.class);
        System.exit(1);
    }
    Properties properties = new Properties();
    try (InputStream inputStream = new FileInputStream(propFile)) {
        properties.load(inputStream);
    } catch (IOException e) {
        e.printStackTrace();
    }
    System.out.println("Listening to scheduling assignments with jobId=" + jobId);
    final CountDownLatch latch = new CountDownLatch(1);
    SimpleSchedulerObserver schedulerObserver = new SimpleSchedulerObserver(properties);
    final AtomicReference<JobAssignmentResult> ref = new AtomicReference<>(null);
    schedulerObserver.getObservable(jobId).filter(new Func1<JobAssignmentResult, Boolean>() {

        @Override
        public Boolean call(JobAssignmentResult jobAssignmentResult) {
            if (jobAssignmentResult == null)
                return false;
            if (jobAssignmentResult.isIdentical(ref.get()))
                return false;
            ref.set(jobAssignmentResult);
            return true;
        }
    }).doOnNext(new Action1<JobAssignmentResult>() {

        @Override
        public void call(JobAssignmentResult jobAssignmentResult) {
            System.out.println("Failures for job " + jobAssignmentResult.getJobId() + ":");
            for (JobAssignmentResult.Failure failure : jobAssignmentResult.getFailures()) try {
                System.out.println("  " + objectMapper.writeValueAsString(failure));
            } catch (JsonProcessingException e) {
                e.printStackTrace();
            }
        }
    }).doOnCompleted(new Action0() {

        @Override
        public void call() {
            latch.countDown();
        }
    }).doOnError(new Action1<Throwable>() {

        @Override
        public void call(Throwable throwable) {
            throwable.printStackTrace();
            latch.countDown();
        }
    }).subscribe();
    System.out.println("Subscribed.");
    try {
        latch.await();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
}
Also used : Action0(rx.functions.Action0) Action1(rx.functions.Action1) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) JobAssignmentResult(io.mantisrx.server.core.JobAssignmentResult) FileInputStream(java.io.FileInputStream) Jdk8Module(io.mantisrx.shaded.com.fasterxml.jackson.datatype.jdk8.Jdk8Module) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException)

Example 4 with JsonProcessingException

use of io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException in project mantis by Netflix.

the class JobMasterService method handleMetricEvent.

private Measurements handleMetricEvent(final String ev) {
    try {
        final Measurements measurements = objectMapper.readValue(ev, Measurements.class);
        final String jobId = measurements.getTags().get(MetricStringConstants.MANTIS_JOB_ID);
        final int workerIdx = Integer.parseInt(measurements.getTags().get(MetricStringConstants.MANTIS_WORKER_INDEX));
        int stage = Integer.parseInt(measurements.getTags().get(MetricStringConstants.MANTIS_STAGE_NUM));
        final int workerNum = Integer.parseInt(measurements.getTags().get(MetricStringConstants.MANTIS_WORKER_NUM));
        List<GaugeMeasurement> gauges = (List<GaugeMeasurement>) measurements.getGauges();
        // Metric is not from current job, it is from the source job
        if (jobId != this.jobId) {
            // Funnel source job metric into the 1st stage
            stage = 1;
            if (gauges.isEmpty()) {
                gauges = measurements.getCounters().stream().map(counter -> new GaugeMeasurement(counter.getEvent(), counter.getCount())).collect(Collectors.toList());
            }
        }
        metricObserver.onNext(new MetricData(jobId, stage, workerIdx, workerNum, measurements.getName(), gauges));
        return measurements;
    } catch (JsonProcessingException e) {
        logger.error("failed to parse json", e);
    } catch (IOException e) {
        logger.error("failed to process json", e);
    } catch (Exception e) {
        logger.error("caught exception", e);
    }
    return null;
}
Also used : Measurements(io.mantisrx.common.metrics.measurement.Measurements) List(java.util.List) IOException(java.io.IOException) GaugeMeasurement(io.mantisrx.common.metrics.measurement.GaugeMeasurement) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException)

Example 5 with JsonProcessingException

use of io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException in project mantis by Netflix.

the class JobScaleUpDownTests method testSchedulingInfo.

// TODO fix for timing issues
// @Test
public void testSchedulingInfo() throws Exception {
    CountDownLatch latch = new CountDownLatch(11);
    List<JobSchedulingInfo> schedulingChangesList = new CopyOnWriteArrayList<>();
    final TestKit probe = new TestKit(system);
    Map<ScalingReason, Strategy> smap = new HashMap<>();
    smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
    smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null));
    SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0, 1.0, 1.0, 3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)).build();
    String clusterName = "testSchedulingInfo";
    MantisScheduler schedulerMock = mock(MantisScheduler.class);
    MantisJobStore jobStoreMock = mock(MantisJobStore.class);
    CountDownLatch worker1Started = new CountDownLatch(1);
    ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system, probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher);
    JobId jobId = new JobId(clusterName, 1);
    JobClusterManagerProto.GetJobSchedInfoRequest getJobSchedInfoRequest = new JobClusterManagerProto.GetJobSchedInfoRequest(jobId);
    jobActor.tell(getJobSchedInfoRequest, probe.getRef());
    JobClusterManagerProto.GetJobSchedInfoResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobSchedInfoResponse.class);
    assertEquals(SUCCESS, resp.responseCode);
    assertTrue(resp.getJobSchedInfoSubject().isPresent());
    ObjectMapper mapper = new ObjectMapper();
    BehaviorSubject<JobSchedulingInfo> jobSchedulingInfoBehaviorSubject = resp.getJobSchedInfoSubject().get();
    jobSchedulingInfoBehaviorSubject.doOnNext((js) -> {
        System.out.println("Got --> " + js.toString());
    }).map((e) -> {
        try {
            return mapper.writeValueAsString(e);
        } catch (JsonProcessingException e1) {
            e1.printStackTrace();
            return "{\"error\":" + e1.getMessage() + "}";
        }
    }).map((js) -> {
        try {
            return mapper.readValue(js, JobSchedulingInfo.class);
        } catch (IOException e) {
            e.printStackTrace();
            return null;
        }
    }).filter((j) -> j != null).doOnNext((js) -> {
    // Map<Integer, WorkerAssignments> workerAssignments = js.getWorkerAssignments();
    // WorkerAssignments workerAssignments1 = workerAssignments.get(1);
    // assertEquals(1, workerAssignments1.getNumWorkers());
    // Map<Integer, WorkerHost> hosts = workerAssignments1.getHosts();
    // // make sure worker number 1 exists
    // assertTrue(hosts.containsKey(1));
    }).doOnCompleted(() -> {
        System.out.println("SchedulingInfo completed");
        System.out.println(schedulingChangesList.size() + " Sched changes received");
    }).observeOn(Schedulers.io()).subscribe((js) -> {
        latch.countDown();
        schedulingChangesList.add(js);
    });
    // send scale up request
    jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(jobId.getId(), 1, 2, "", ""), probe.getRef());
    JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
    System.out.println("ScaleupResp " + scaleResp.message);
    assertEquals(SUCCESS, scaleResp.responseCode);
    assertEquals(2, scaleResp.getActualNumWorkers());
    JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId.getId(), 1, new WorkerId(jobId.getId(), 1, 3));
    // worker gets lost
    JobTestHelper.sendWorkerTerminatedEvent(probe, jobActor, jobId.getId(), new WorkerId(jobId.getId(), 1, 3));
    // Send replacement worker messages
    JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId.getId(), 1, new WorkerId(jobId.getId(), 1, 4));
    // scale down
    jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(jobId.getId(), 1, 1, "", ""), probe.getRef());
    JobClusterManagerProto.ScaleStageResponse scaleDownResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
    System.out.println("ScaleDownResp " + scaleDownResp.message);
    assertEquals(SUCCESS, scaleDownResp.responseCode);
    assertEquals(1, scaleDownResp.getActualNumWorkers());
    // kill job
    jobActor.tell(new JobClusterProto.KillJobRequest(jobId, "killed", JobCompletedReason.Killed, "test", probe.getRef()), probe.getRef());
    probe.expectMsgClass(JobClusterProto.KillJobResponse.class);
    for (JobSchedulingInfo jobSchedulingInfo : schedulingChangesList) {
        System.out.println(jobSchedulingInfo);
    }
    /*
    SchedulingChange [jobId=testSchedulingInfo-1,
workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=1, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
													   3=WorkerHost [state=Launched, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
													   3=WorkerHost [state=StartInitiated, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
													   3=WorkerHost [state=Started, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
													   4=WorkerHost [state=Launched, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
														4=WorkerHost [state=StartInitiated, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
														4=WorkerHost [state=Started, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=1, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}
	]}]
     */
    latch.await(1000, TimeUnit.SECONDS);
    System.out.println("---->Verifying scheduling changes " + schedulingChangesList.size());
    assertEquals(11, schedulingChangesList.size());
    for (int i = 0; i < schedulingChangesList.size(); i++) {
        JobSchedulingInfo js = schedulingChangesList.get(i);
        // jobid is correct
        assertEquals(jobId.getId(), js.getJobId());
        Map<Integer, WorkerAssignments> workerAssignments = js.getWorkerAssignments();
        // has info about stage 1
        System.out.println("WorkerAssignments -> " + workerAssignments);
        // assertTrue(workerAssignments.containsKey(1));
        switch(i) {
            case 0:
                WorkerAssignments wa0 = workerAssignments.get(1);
                assertEquals(1, wa0.getNumWorkers());
                Map<Integer, WorkerHost> hosts0 = wa0.getHosts();
                // make sure worker number 2 exists
                validateHost(hosts0, 0, 2, MantisJobState.Started);
                break;
            // scale up by 1
            case 1:
                WorkerAssignments wa1 = workerAssignments.get(1);
                assertEquals(2, wa1.getNumWorkers());
                Map<Integer, WorkerHost> hosts1 = wa1.getHosts();
                assertEquals(1, hosts1.size());
                // first update has only numWorkers updated but the new worker is still in Accepted state, so no host entry for it
                validateHost(hosts1, 0, 2, MantisJobState.Started);
                assertFalse(hosts1.containsKey(3));
                break;
            case 2:
                WorkerAssignments wa2 = workerAssignments.get(1);
                assertEquals(2, wa2.getNumWorkers());
                Map<Integer, WorkerHost> hosts2 = wa2.getHosts();
                assertEquals(2, hosts2.size());
                // next update should have both numWorkers and the new worker in Launched state
                validateHost(hosts2, 0, 2, MantisJobState.Started);
                validateHost(hosts2, 1, 3, MantisJobState.Launched);
                break;
            case 3:
                WorkerAssignments wa3 = workerAssignments.get(1);
                assertEquals(2, wa3.getNumWorkers());
                Map<Integer, WorkerHost> hosts3 = wa3.getHosts();
                assertEquals(2, hosts3.size());
                // this update is for new worker in StartInit state
                validateHost(hosts3, 0, 2, MantisJobState.Started);
                validateHost(hosts3, 1, 3, MantisJobState.StartInitiated);
                break;
            case 4:
                WorkerAssignments wa4 = workerAssignments.get(1);
                assertEquals(2, wa4.getNumWorkers());
                Map<Integer, WorkerHost> hosts4 = wa4.getHosts();
                assertEquals(2, hosts4.size());
                // this update is for new worker in Started state
                validateHost(hosts4, 0, 2, MantisJobState.Started);
                validateHost(hosts4, 1, 3, MantisJobState.Started);
                break;
            case 5:
                // worker 3 is lost and should be resubmitted
                WorkerAssignments wa5 = workerAssignments.get(1);
                assertEquals(2, wa5.getNumWorkers());
                Map<Integer, WorkerHost> hosts5 = wa5.getHosts();
                assertEquals(1, hosts5.size());
                validateHost(hosts5, 0, 2, MantisJobState.Started);
                assertFalse(hosts5.containsKey(3));
                break;
            case 6:
                // worker 3 is replaced by worker num 4
                WorkerAssignments wa6 = workerAssignments.get(1);
                assertEquals(2, wa6.getNumWorkers());
                Map<Integer, WorkerHost> hosts6 = wa6.getHosts();
                // this update should have both numWorkers and the new worker in Launched state
                assertEquals(2, hosts6.size());
                validateHost(hosts6, 0, 2, MantisJobState.Started);
                validateHost(hosts6, 1, 4, MantisJobState.Launched);
                break;
            case 7:
                WorkerAssignments wa7 = workerAssignments.get(1);
                assertEquals(2, wa7.getNumWorkers());
                Map<Integer, WorkerHost> hosts7 = wa7.getHosts();
                // update for new worker in StartInit state
                assertEquals(2, hosts7.size());
                validateHost(hosts7, 0, 2, MantisJobState.Started);
                validateHost(hosts7, 1, 4, MantisJobState.StartInitiated);
                break;
            case 8:
                WorkerAssignments wa8 = workerAssignments.get(1);
                assertEquals(2, wa8.getNumWorkers());
                Map<Integer, WorkerHost> hosts8 = wa8.getHosts();
                // update for new worker in Started state
                assertEquals(2, hosts8.size());
                validateHost(hosts8, 0, 2, MantisJobState.Started);
                validateHost(hosts8, 1, 4, MantisJobState.Started);
                break;
            case 9:
                // scale down, worker 4 should be gone now and numWorkers set to 1
                WorkerAssignments wa9 = workerAssignments.get(1);
                assertEquals(1, wa9.getNumWorkers());
                Map<Integer, WorkerHost> hosts9 = wa9.getHosts();
                assertTrue(hosts9.containsKey(2));
                assertEquals(1, hosts9.size());
                validateHost(hosts9, 0, 2, MantisJobState.Started);
                break;
            case 10:
                // job has been killed
                assertTrue(workerAssignments.isEmpty());
                break;
            default:
                fail();
        }
    }
// 
// verify(jobStoreMock, times(1)).storeNewJob(any());
// // initial worker
// verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
// 
// //scale up worker
// verify(jobStoreMock, times(1)).storeNewWorker(any());
// 
// // verify(jobStoreMock, times(17)).updateWorker(any());
// 
// verify(jobStoreMock, times(3)).updateJob(any());
// 
// // initial worker + job master and scale up worker + resubmit
// verify(schedulerMock, times(4)).scheduleWorker(any());
// 
// verify(schedulerMock, times(4)).unscheduleAndTerminateWorker(any(), any());
}
Also used : JobId(io.mantisrx.server.master.domain.JobId) TestHelpers(com.netflix.mantis.master.scheduler.TestHelpers) ObjectMapper(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper) MantisJobState(io.mantisrx.runtime.MantisJobState) MantisJobStore(io.mantisrx.server.master.persistence.MantisJobStore) JobCompletedReason(io.mantisrx.server.core.JobCompletedReason) MantisScheduler(io.mantisrx.server.master.scheduler.MantisScheduler) CLIENT_ERROR(io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR) JobClusterProto(io.mantisrx.master.jobcluster.proto.JobClusterProto) ActorRef(akka.actor.ActorRef) Map(java.util.Map) Schedulers(rx.schedulers.Schedulers) Assert.fail(org.junit.Assert.fail) StatusEventSubscriberLoggingImpl(io.mantisrx.master.events.StatusEventSubscriberLoggingImpl) AfterClass(org.junit.AfterClass) WorkerEventSubscriberLoggingImpl(io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl) WorkerAssignments(io.mantisrx.server.core.WorkerAssignments) Matchers.any(org.mockito.Matchers.any) CountDownLatch(java.util.concurrent.CountDownLatch) WorkerId(io.mantisrx.server.core.domain.WorkerId) InvalidJobException(io.mantisrx.server.master.persistence.exceptions.InvalidJobException) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) ActorSystem(akka.actor.ActorSystem) BehaviorSubject(rx.subjects.BehaviorSubject) StageScalingPolicy(io.mantisrx.runtime.descriptor.StageScalingPolicy) ScalingReason(io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason) Mockito.mock(org.mockito.Mockito.mock) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) BeforeClass(org.junit.BeforeClass) HashMap(java.util.HashMap) AuditEventSubscriberLoggingImpl(io.mantisrx.master.events.AuditEventSubscriberLoggingImpl) LifecycleEventPublisherImpl(io.mantisrx.master.events.LifecycleEventPublisherImpl) SchedulingInfo(io.mantisrx.runtime.descriptor.SchedulingInfo) MachineDefinition(io.mantisrx.runtime.MachineDefinition) Strategy(io.mantisrx.runtime.descriptor.StageScalingPolicy.Strategy) JobSchedulingInfo(io.mantisrx.server.core.JobSchedulingInfo) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException) Assert.assertTrue(org.junit.Assert.assertTrue) Mockito.times(org.mockito.Mockito.times) IOException(java.io.IOException) Test(org.junit.Test) TestKit(akka.testkit.javadsl.TestKit) SUCCESS(io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS) Mockito.verify(org.mockito.Mockito.verify) TimeUnit(java.util.concurrent.TimeUnit) JobClusterManagerProto(io.mantisrx.master.jobcluster.proto.JobClusterManagerProto) Lists(io.mantisrx.shaded.com.google.common.collect.Lists) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) LifecycleEventPublisher(io.mantisrx.master.events.LifecycleEventPublisher) WorkerHost(io.mantisrx.server.core.WorkerHost) JobClusterProto(io.mantisrx.master.jobcluster.proto.JobClusterProto) WorkerHost(io.mantisrx.server.core.WorkerHost) HashMap(java.util.HashMap) ActorRef(akka.actor.ActorRef) MantisScheduler(io.mantisrx.server.master.scheduler.MantisScheduler) WorkerAssignments(io.mantisrx.server.core.WorkerAssignments) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException) JobId(io.mantisrx.server.master.domain.JobId) ObjectMapper(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper) JobClusterManagerProto(io.mantisrx.master.jobcluster.proto.JobClusterManagerProto) SchedulingInfo(io.mantisrx.runtime.descriptor.SchedulingInfo) JobSchedulingInfo(io.mantisrx.server.core.JobSchedulingInfo) MachineDefinition(io.mantisrx.runtime.MachineDefinition) JobSchedulingInfo(io.mantisrx.server.core.JobSchedulingInfo) TestKit(akka.testkit.javadsl.TestKit) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) WorkerId(io.mantisrx.server.core.domain.WorkerId) StageScalingPolicy(io.mantisrx.runtime.descriptor.StageScalingPolicy) MantisJobStore(io.mantisrx.server.master.persistence.MantisJobStore) Strategy(io.mantisrx.runtime.descriptor.StageScalingPolicy.Strategy) ScalingReason(io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Aggregations

JsonProcessingException (io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException)8 ObjectMapper (io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper)3 IOException (java.io.IOException)3 List (java.util.List)3 Measurements (io.mantisrx.common.metrics.measurement.Measurements)2 MachineDefinition (io.mantisrx.runtime.MachineDefinition)2 Map (java.util.Map)2 ActorRef (akka.actor.ActorRef)1 ActorSystem (akka.actor.ActorSystem)1 TestKit (akka.testkit.javadsl.TestKit)1 ByteString (com.google.protobuf.ByteString)1 TestHelpers (com.netflix.mantis.master.scheduler.TestHelpers)1 Label (io.mantisrx.common.Label)1 Counter (io.mantisrx.common.metrics.Counter)1 Metrics (io.mantisrx.common.metrics.Metrics)1 GaugeMeasurement (io.mantisrx.common.metrics.measurement.GaugeMeasurement)1 AuditEventSubscriberLoggingImpl (io.mantisrx.master.events.AuditEventSubscriberLoggingImpl)1 LifecycleEventPublisher (io.mantisrx.master.events.LifecycleEventPublisher)1 LifecycleEventPublisherImpl (io.mantisrx.master.events.LifecycleEventPublisherImpl)1 StatusEventSubscriberLoggingImpl (io.mantisrx.master.events.StatusEventSubscriberLoggingImpl)1