Search in sources :

Example 11 with ObjectMapper

use of io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper in project mantis by Netflix.

the class JobScaleUpDownTests method testSchedulingInfo.

// TODO fix for timing issues
// @Test
public void testSchedulingInfo() throws Exception {
    CountDownLatch latch = new CountDownLatch(11);
    List<JobSchedulingInfo> schedulingChangesList = new CopyOnWriteArrayList<>();
    final TestKit probe = new TestKit(system);
    Map<ScalingReason, Strategy> smap = new HashMap<>();
    smap.put(ScalingReason.CPU, new Strategy(ScalingReason.CPU, 0.5, 0.75, null));
    smap.put(ScalingReason.DataDrop, new Strategy(ScalingReason.DataDrop, 0.0, 2.0, null));
    SchedulingInfo sInfo = new SchedulingInfo.Builder().numberOfStages(1).multiWorkerScalableStageWithConstraints(1, new MachineDefinition(1.0, 1.0, 1.0, 3), Lists.newArrayList(), Lists.newArrayList(), new StageScalingPolicy(1, 0, 10, 1, 1, 0, smap)).build();
    String clusterName = "testSchedulingInfo";
    MantisScheduler schedulerMock = mock(MantisScheduler.class);
    MantisJobStore jobStoreMock = mock(MantisJobStore.class);
    CountDownLatch worker1Started = new CountDownLatch(1);
    ActorRef jobActor = JobTestHelper.submitSingleStageScalableJob(system, probe, clusterName, sInfo, schedulerMock, jobStoreMock, lifecycleEventPublisher);
    JobId jobId = new JobId(clusterName, 1);
    JobClusterManagerProto.GetJobSchedInfoRequest getJobSchedInfoRequest = new JobClusterManagerProto.GetJobSchedInfoRequest(jobId);
    jobActor.tell(getJobSchedInfoRequest, probe.getRef());
    JobClusterManagerProto.GetJobSchedInfoResponse resp = probe.expectMsgClass(JobClusterManagerProto.GetJobSchedInfoResponse.class);
    assertEquals(SUCCESS, resp.responseCode);
    assertTrue(resp.getJobSchedInfoSubject().isPresent());
    ObjectMapper mapper = new ObjectMapper();
    BehaviorSubject<JobSchedulingInfo> jobSchedulingInfoBehaviorSubject = resp.getJobSchedInfoSubject().get();
    jobSchedulingInfoBehaviorSubject.doOnNext((js) -> {
        System.out.println("Got --> " + js.toString());
    }).map((e) -> {
        try {
            return mapper.writeValueAsString(e);
        } catch (JsonProcessingException e1) {
            e1.printStackTrace();
            return "{\"error\":" + e1.getMessage() + "}";
        }
    }).map((js) -> {
        try {
            return mapper.readValue(js, JobSchedulingInfo.class);
        } catch (IOException e) {
            e.printStackTrace();
            return null;
        }
    }).filter((j) -> j != null).doOnNext((js) -> {
    // Map<Integer, WorkerAssignments> workerAssignments = js.getWorkerAssignments();
    // WorkerAssignments workerAssignments1 = workerAssignments.get(1);
    // assertEquals(1, workerAssignments1.getNumWorkers());
    // Map<Integer, WorkerHost> hosts = workerAssignments1.getHosts();
    // // make sure worker number 1 exists
    // assertTrue(hosts.containsKey(1));
    }).doOnCompleted(() -> {
        System.out.println("SchedulingInfo completed");
        System.out.println(schedulingChangesList.size() + " Sched changes received");
    }).observeOn(Schedulers.io()).subscribe((js) -> {
        latch.countDown();
        schedulingChangesList.add(js);
    });
    // send scale up request
    jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(jobId.getId(), 1, 2, "", ""), probe.getRef());
    JobClusterManagerProto.ScaleStageResponse scaleResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
    System.out.println("ScaleupResp " + scaleResp.message);
    assertEquals(SUCCESS, scaleResp.responseCode);
    assertEquals(2, scaleResp.getActualNumWorkers());
    JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId.getId(), 1, new WorkerId(jobId.getId(), 1, 3));
    // worker gets lost
    JobTestHelper.sendWorkerTerminatedEvent(probe, jobActor, jobId.getId(), new WorkerId(jobId.getId(), 1, 3));
    // Send replacement worker messages
    JobTestHelper.sendLaunchedInitiatedStartedEventsToWorker(probe, jobActor, jobId.getId(), 1, new WorkerId(jobId.getId(), 1, 4));
    // scale down
    jobActor.tell(new JobClusterManagerProto.ScaleStageRequest(jobId.getId(), 1, 1, "", ""), probe.getRef());
    JobClusterManagerProto.ScaleStageResponse scaleDownResp = probe.expectMsgClass(JobClusterManagerProto.ScaleStageResponse.class);
    System.out.println("ScaleDownResp " + scaleDownResp.message);
    assertEquals(SUCCESS, scaleDownResp.responseCode);
    assertEquals(1, scaleDownResp.getActualNumWorkers());
    // kill job
    jobActor.tell(new JobClusterProto.KillJobRequest(jobId, "killed", JobCompletedReason.Killed, "test", probe.getRef()), probe.getRef());
    probe.expectMsgClass(JobClusterProto.KillJobResponse.class);
    for (JobSchedulingInfo jobSchedulingInfo : schedulingChangesList) {
        System.out.println(jobSchedulingInfo);
    }
    /*
    SchedulingChange [jobId=testSchedulingInfo-1,
workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=1, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
													   3=WorkerHost [state=Launched, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
													   3=WorkerHost [state=StartInitiated, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
													   3=WorkerHost [state=Started, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
													   4=WorkerHost [state=Launched, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
														4=WorkerHost [state=StartInitiated, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=2, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]],
														4=WorkerHost [state=Started, workerIndex=1, host=host1, port=[9020]]}]}]
SchedulingChange [jobId=testSchedulingInfo-1, workerAssignments={
	0=WorkerAssignments [stage=0, numWorkers=1, hosts={1=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}],
	1=WorkerAssignments [stage=1, numWorkers=1, hosts={2=WorkerHost [state=Started, workerIndex=0, host=host1, port=[9020]]}
	]}]
     */
    latch.await(1000, TimeUnit.SECONDS);
    System.out.println("---->Verifying scheduling changes " + schedulingChangesList.size());
    assertEquals(11, schedulingChangesList.size());
    for (int i = 0; i < schedulingChangesList.size(); i++) {
        JobSchedulingInfo js = schedulingChangesList.get(i);
        // jobid is correct
        assertEquals(jobId.getId(), js.getJobId());
        Map<Integer, WorkerAssignments> workerAssignments = js.getWorkerAssignments();
        // has info about stage 1
        System.out.println("WorkerAssignments -> " + workerAssignments);
        // assertTrue(workerAssignments.containsKey(1));
        switch(i) {
            case 0:
                WorkerAssignments wa0 = workerAssignments.get(1);
                assertEquals(1, wa0.getNumWorkers());
                Map<Integer, WorkerHost> hosts0 = wa0.getHosts();
                // make sure worker number 2 exists
                validateHost(hosts0, 0, 2, MantisJobState.Started);
                break;
            // scale up by 1
            case 1:
                WorkerAssignments wa1 = workerAssignments.get(1);
                assertEquals(2, wa1.getNumWorkers());
                Map<Integer, WorkerHost> hosts1 = wa1.getHosts();
                assertEquals(1, hosts1.size());
                // first update has only numWorkers updated but the new worker is still in Accepted state, so no host entry for it
                validateHost(hosts1, 0, 2, MantisJobState.Started);
                assertFalse(hosts1.containsKey(3));
                break;
            case 2:
                WorkerAssignments wa2 = workerAssignments.get(1);
                assertEquals(2, wa2.getNumWorkers());
                Map<Integer, WorkerHost> hosts2 = wa2.getHosts();
                assertEquals(2, hosts2.size());
                // next update should have both numWorkers and the new worker in Launched state
                validateHost(hosts2, 0, 2, MantisJobState.Started);
                validateHost(hosts2, 1, 3, MantisJobState.Launched);
                break;
            case 3:
                WorkerAssignments wa3 = workerAssignments.get(1);
                assertEquals(2, wa3.getNumWorkers());
                Map<Integer, WorkerHost> hosts3 = wa3.getHosts();
                assertEquals(2, hosts3.size());
                // this update is for new worker in StartInit state
                validateHost(hosts3, 0, 2, MantisJobState.Started);
                validateHost(hosts3, 1, 3, MantisJobState.StartInitiated);
                break;
            case 4:
                WorkerAssignments wa4 = workerAssignments.get(1);
                assertEquals(2, wa4.getNumWorkers());
                Map<Integer, WorkerHost> hosts4 = wa4.getHosts();
                assertEquals(2, hosts4.size());
                // this update is for new worker in Started state
                validateHost(hosts4, 0, 2, MantisJobState.Started);
                validateHost(hosts4, 1, 3, MantisJobState.Started);
                break;
            case 5:
                // worker 3 is lost and should be resubmitted
                WorkerAssignments wa5 = workerAssignments.get(1);
                assertEquals(2, wa5.getNumWorkers());
                Map<Integer, WorkerHost> hosts5 = wa5.getHosts();
                assertEquals(1, hosts5.size());
                validateHost(hosts5, 0, 2, MantisJobState.Started);
                assertFalse(hosts5.containsKey(3));
                break;
            case 6:
                // worker 3 is replaced by worker num 4
                WorkerAssignments wa6 = workerAssignments.get(1);
                assertEquals(2, wa6.getNumWorkers());
                Map<Integer, WorkerHost> hosts6 = wa6.getHosts();
                // this update should have both numWorkers and the new worker in Launched state
                assertEquals(2, hosts6.size());
                validateHost(hosts6, 0, 2, MantisJobState.Started);
                validateHost(hosts6, 1, 4, MantisJobState.Launched);
                break;
            case 7:
                WorkerAssignments wa7 = workerAssignments.get(1);
                assertEquals(2, wa7.getNumWorkers());
                Map<Integer, WorkerHost> hosts7 = wa7.getHosts();
                // update for new worker in StartInit state
                assertEquals(2, hosts7.size());
                validateHost(hosts7, 0, 2, MantisJobState.Started);
                validateHost(hosts7, 1, 4, MantisJobState.StartInitiated);
                break;
            case 8:
                WorkerAssignments wa8 = workerAssignments.get(1);
                assertEquals(2, wa8.getNumWorkers());
                Map<Integer, WorkerHost> hosts8 = wa8.getHosts();
                // update for new worker in Started state
                assertEquals(2, hosts8.size());
                validateHost(hosts8, 0, 2, MantisJobState.Started);
                validateHost(hosts8, 1, 4, MantisJobState.Started);
                break;
            case 9:
                // scale down, worker 4 should be gone now and numWorkers set to 1
                WorkerAssignments wa9 = workerAssignments.get(1);
                assertEquals(1, wa9.getNumWorkers());
                Map<Integer, WorkerHost> hosts9 = wa9.getHosts();
                assertTrue(hosts9.containsKey(2));
                assertEquals(1, hosts9.size());
                validateHost(hosts9, 0, 2, MantisJobState.Started);
                break;
            case 10:
                // job has been killed
                assertTrue(workerAssignments.isEmpty());
                break;
            default:
                fail();
        }
    }
// 
// verify(jobStoreMock, times(1)).storeNewJob(any());
// // initial worker
// verify(jobStoreMock, times(1)).storeNewWorkers(any(),any());
// 
// //scale up worker
// verify(jobStoreMock, times(1)).storeNewWorker(any());
// 
// // verify(jobStoreMock, times(17)).updateWorker(any());
// 
// verify(jobStoreMock, times(3)).updateJob(any());
// 
// // initial worker + job master and scale up worker + resubmit
// verify(schedulerMock, times(4)).scheduleWorker(any());
// 
// verify(schedulerMock, times(4)).unscheduleAndTerminateWorker(any(), any());
}
Also used : JobId(io.mantisrx.server.master.domain.JobId) TestHelpers(com.netflix.mantis.master.scheduler.TestHelpers) ObjectMapper(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper) MantisJobState(io.mantisrx.runtime.MantisJobState) MantisJobStore(io.mantisrx.server.master.persistence.MantisJobStore) JobCompletedReason(io.mantisrx.server.core.JobCompletedReason) MantisScheduler(io.mantisrx.server.master.scheduler.MantisScheduler) CLIENT_ERROR(io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.CLIENT_ERROR) JobClusterProto(io.mantisrx.master.jobcluster.proto.JobClusterProto) ActorRef(akka.actor.ActorRef) Map(java.util.Map) Schedulers(rx.schedulers.Schedulers) Assert.fail(org.junit.Assert.fail) StatusEventSubscriberLoggingImpl(io.mantisrx.master.events.StatusEventSubscriberLoggingImpl) AfterClass(org.junit.AfterClass) WorkerEventSubscriberLoggingImpl(io.mantisrx.master.events.WorkerEventSubscriberLoggingImpl) WorkerAssignments(io.mantisrx.server.core.WorkerAssignments) Matchers.any(org.mockito.Matchers.any) CountDownLatch(java.util.concurrent.CountDownLatch) WorkerId(io.mantisrx.server.core.domain.WorkerId) InvalidJobException(io.mantisrx.server.master.persistence.exceptions.InvalidJobException) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) ActorSystem(akka.actor.ActorSystem) BehaviorSubject(rx.subjects.BehaviorSubject) StageScalingPolicy(io.mantisrx.runtime.descriptor.StageScalingPolicy) ScalingReason(io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason) Mockito.mock(org.mockito.Mockito.mock) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) BeforeClass(org.junit.BeforeClass) HashMap(java.util.HashMap) AuditEventSubscriberLoggingImpl(io.mantisrx.master.events.AuditEventSubscriberLoggingImpl) LifecycleEventPublisherImpl(io.mantisrx.master.events.LifecycleEventPublisherImpl) SchedulingInfo(io.mantisrx.runtime.descriptor.SchedulingInfo) MachineDefinition(io.mantisrx.runtime.MachineDefinition) Strategy(io.mantisrx.runtime.descriptor.StageScalingPolicy.Strategy) JobSchedulingInfo(io.mantisrx.server.core.JobSchedulingInfo) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException) Assert.assertTrue(org.junit.Assert.assertTrue) Mockito.times(org.mockito.Mockito.times) IOException(java.io.IOException) Test(org.junit.Test) TestKit(akka.testkit.javadsl.TestKit) SUCCESS(io.mantisrx.master.jobcluster.proto.BaseResponse.ResponseCode.SUCCESS) Mockito.verify(org.mockito.Mockito.verify) TimeUnit(java.util.concurrent.TimeUnit) JobClusterManagerProto(io.mantisrx.master.jobcluster.proto.JobClusterManagerProto) Lists(io.mantisrx.shaded.com.google.common.collect.Lists) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) LifecycleEventPublisher(io.mantisrx.master.events.LifecycleEventPublisher) WorkerHost(io.mantisrx.server.core.WorkerHost) JobClusterProto(io.mantisrx.master.jobcluster.proto.JobClusterProto) WorkerHost(io.mantisrx.server.core.WorkerHost) HashMap(java.util.HashMap) ActorRef(akka.actor.ActorRef) MantisScheduler(io.mantisrx.server.master.scheduler.MantisScheduler) WorkerAssignments(io.mantisrx.server.core.WorkerAssignments) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException) JobId(io.mantisrx.server.master.domain.JobId) ObjectMapper(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper) JobClusterManagerProto(io.mantisrx.master.jobcluster.proto.JobClusterManagerProto) SchedulingInfo(io.mantisrx.runtime.descriptor.SchedulingInfo) JobSchedulingInfo(io.mantisrx.server.core.JobSchedulingInfo) MachineDefinition(io.mantisrx.runtime.MachineDefinition) JobSchedulingInfo(io.mantisrx.server.core.JobSchedulingInfo) TestKit(akka.testkit.javadsl.TestKit) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) WorkerId(io.mantisrx.server.core.domain.WorkerId) StageScalingPolicy(io.mantisrx.runtime.descriptor.StageScalingPolicy) MantisJobStore(io.mantisrx.server.master.persistence.MantisJobStore) Strategy(io.mantisrx.runtime.descriptor.StageScalingPolicy.Strategy) ScalingReason(io.mantisrx.runtime.descriptor.StageScalingPolicy.ScalingReason) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 12 with ObjectMapper

use of io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper in project mantis by Netflix.

the class FileBasedOffsetCheckpointStrategyTest method testOffsetAndMetadataSerialization.

@Test
public void testOffsetAndMetadataSerialization() {
    OffsetAndMetadata expected = new OffsetAndMetadata(100, "tempmeta");
    final SimpleModule module = new SimpleModule().addSerializer(OffsetAndMetadata.class, new OffsetAndMetadataSerializer()).addDeserializer(OffsetAndMetadata.class, new OffsetAndMetadataDeserializer());
    final ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
    mapper.registerModule(module);
    try {
        final String s = mapper.writeValueAsString(expected);
        final OffsetAndMetadata actual = mapper.readValue(s, OffsetAndMetadata.class);
        assertEquals(expected, actual);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
Also used : OffsetAndMetadataDeserializer(io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataDeserializer) OffsetAndMetadataSerializer(io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataSerializer) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) IOException(java.io.IOException) SimpleModule(io.mantisrx.shaded.com.fasterxml.jackson.databind.module.SimpleModule) ObjectMapper(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test)

Example 13 with ObjectMapper

use of io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper in project mantis by Netflix.

the class MantisEnvelopeTest method deserTest.

@Test
public void deserTest() {
    String data = "{\"ts\":1571174446676,\"originServer\":\"origin\",\"eventList\":[{\"id\":1,\"data\":\"{\\\"mantisStream\\\":\\\"defaultStream\\\",\\\"matched-clients\\\":[\\\"MantisPushRequestEvents_PushRequestEventSourceJobLocal-1_nj3\\\"],\\\"id\\\":44,\\\"type\\\":\\\"EVENT\\\"}\"}]}";
    final ObjectMapper mapper = new ObjectMapper();
    ObjectReader mantisEventEnvelopeReader = mapper.readerFor(MantisEventEnvelope.class);
    try {
        MantisEventEnvelope envelope = mantisEventEnvelopeReader.readValue(data);
        System.out.println("Envelope=>" + envelope);
    } catch (JsonProcessingException e) {
        e.printStackTrace();
        fail();
    }
}
Also used : ObjectReader(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectReader) JsonProcessingException(io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException) ObjectMapper(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.jupiter.api.Test)

Example 14 with ObjectMapper

use of io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper in project mantis by Netflix.

the class StageSchedulingInfo method main.

public static void main(String[] args) {
    String json = "{\"numberOfInstances\":1,\"machineDefinition\":{\"cpuCores\":1.0,\"memoryMB\":2048.0,\"diskMB\":1.0,\"numPorts\":1},\"hardConstraints\":[\"UniqueHost\"],\"softConstraints\":[\"ExclusiveHost\"],\"scalable\":\"true\"}";
    ObjectMapper mapper = new ObjectMapper();
    try {
        StageSchedulingInfo info = mapper.readValue(json, StageSchedulingInfo.class);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
Also used : IOException(java.io.IOException) ObjectMapper(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper)

Example 15 with ObjectMapper

use of io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper in project mantis by Netflix.

the class JobClustersRouteTest method compareClusterInstancePayload.

private void compareClusterInstancePayload(String clusterGetResponse) {
    try {
        ObjectMapper mapper = new ObjectMapper();
        JsonNode requestObj = mapper.readTree(JobClusterPayloads.JOB_CLUSTER_CREATE);
        JsonNode responseObj = mapper.readTree(clusterGetResponse);
        assertEquals(responseObj.get("name").toString(), requestObj.get("jobDefinition").get("name").toString());
        assertEquals(responseObj.get("jars").get(0).get("url").toString(), requestObj.get("jobDefinition").get("jobJarFileLocation").toString());
        assertEquals(responseObj.get("jars").get(0).get("version").toString(), requestObj.get("jobDefinition").get("version").toString());
    } catch (IOException ex) {
        assert ex == null;
    }
}
Also used : JsonNode(io.mantisrx.shaded.com.fasterxml.jackson.databind.JsonNode) IOException(java.io.IOException) ObjectMapper(io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper)

Aggregations

ObjectMapper (io.mantisrx.shaded.com.fasterxml.jackson.databind.ObjectMapper)17 IOException (java.io.IOException)10 Test (org.junit.Test)7 JsonNode (io.mantisrx.shaded.com.fasterxml.jackson.databind.JsonNode)5 JsonProcessingException (io.mantisrx.shaded.com.fasterxml.jackson.core.JsonProcessingException)3 MachineDefinition (io.mantisrx.runtime.MachineDefinition)2 MantisWorkerMetadataWritable (io.mantisrx.server.master.store.MantisWorkerMetadataWritable)2 HashMap (java.util.HashMap)2 ActorRef (akka.actor.ActorRef)1 ActorSystem (akka.actor.ActorSystem)1 TestKit (akka.testkit.javadsl.TestKit)1 TestHelpers (com.netflix.mantis.master.scheduler.TestHelpers)1 Label (io.mantisrx.common.Label)1 WorkerPorts (io.mantisrx.common.WorkerPorts)1 OffsetAndMetadataDeserializer (io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataDeserializer)1 OffsetAndMetadataSerializer (io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataSerializer)1 AuditEventSubscriberLoggingImpl (io.mantisrx.master.events.AuditEventSubscriberLoggingImpl)1 LifecycleEventPublisher (io.mantisrx.master.events.LifecycleEventPublisher)1 LifecycleEventPublisherImpl (io.mantisrx.master.events.LifecycleEventPublisherImpl)1 StatusEventSubscriberLoggingImpl (io.mantisrx.master.events.StatusEventSubscriberLoggingImpl)1