Search in sources :

Example 16 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project apex-core by apache.

the class HostLocalTest method testUnavailableResources.

@Test
public void testUnavailableResources() {
    LogicalPlan dag = new LogicalPlan();
    dag.getAttributes().put(com.datatorrent.api.Context.DAGContext.APPLICATION_PATH, new File("target", HostLocalTest.class.getName()).getAbsolutePath());
    dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
    GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
    dag.getMeta(o1).getAttributes().put(OperatorContext.LOCALITY_HOST, "host2");
    GenericTestOperator partitioned = dag.addOperator("partitioned", GenericTestOperator.class);
    dag.addStream("o1_outport1", o1.outport1, partitioned.inport1).setLocality(Locality.CONTAINER_LOCAL);
    dag.setOperatorAttribute(o1, OperatorContext.MEMORY_MB, 256);
    dag.setOperatorAttribute(o1, OperatorContext.VCORES, 2);
    dag.setOperatorAttribute(partitioned, OperatorContext.VCORES, 1);
    StreamingContainerManager scm = new StreamingContainerManager(dag);
    ResourceRequestHandler rr = new ResourceRequestHandler();
    int containerMem = 1000;
    Map<String, NodeReport> nodeReports = Maps.newHashMap();
    NodeReport nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host1", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
    nodeReports.put(nr.getNodeId().getHost(), nr);
    nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host2", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
    nodeReports.put(nr.getNodeId().getHost(), nr);
    // set resources
    rr.updateNodeReports(Lists.newArrayList(nodeReports.values()));
    Assert.assertEquals("number of containers is 1", 1, scm.containerStartRequests.size());
    for (ContainerStartRequest csr : scm.containerStartRequests) {
        String host = rr.getHost(csr, true);
        Assert.assertEquals("number of vcores", 3, csr.container.getRequiredVCores());
        Assert.assertNull("Host is null", host);
    }
}
Also used : ContainerStartRequest(com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) MemoryStorageAgent(com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) File(java.io.File) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Example 17 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project apex-core by apache.

the class LocalityTest method testNodeLocal.

@Test
public void testNodeLocal() {
    LogicalPlan dag = new LogicalPlan();
    dag.getAttributes().put(com.datatorrent.api.Context.DAGContext.APPLICATION_PATH, new File("target", LocalityTest.class.getName()).getAbsolutePath());
    dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
    GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
    GenericTestOperator partitioned = dag.addOperator("partitioned", GenericTestOperator.class);
    dag.getMeta(partitioned).getAttributes().put(OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
    GenericTestOperator partitionedParallel = dag.addOperator("partitionedParallel", GenericTestOperator.class);
    dag.addStream("o1_outport1", o1.outport1, partitioned.inport1).setLocality(null);
    dag.addStream("partitioned_outport1", partitioned.outport1, partitionedParallel.inport2).setLocality(Locality.NODE_LOCAL);
    dag.setInputPortAttribute(partitionedParallel.inport2, PortContext.PARTITION_PARALLEL, true);
    GenericTestOperator single = dag.addOperator("single", GenericTestOperator.class);
    dag.addStream("partitionedParallel_outport1", partitionedParallel.outport1, single.inport1);
    int maxContainers = 7;
    dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);
    StreamingContainerManager scm = new StreamingContainerManager(dag);
    Assert.assertEquals("number required containers", 6, scm.containerStartRequests.size());
    ResourceRequestHandler rr = new ResourceRequestHandler();
    int containerMem = 2000;
    Map<String, NodeReport> nodeReports = Maps.newHashMap();
    NodeReport nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host1", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
    nodeReports.put(nr.getNodeId().getHost(), nr);
    nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host2", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
    nodeReports.put(nr.getNodeId().getHost(), nr);
    // set resources
    rr.updateNodeReports(Lists.newArrayList(nodeReports.values()));
    Map<PTContainer, String> requestedHosts = Maps.newHashMap();
    for (ContainerStartRequest csr : scm.containerStartRequests) {
        String host = rr.getHost(csr, true);
        csr.container.host = host;
        // update the node report
        if (host != null) {
            requestedHosts.put(csr.container, host);
            nr = nodeReports.get(host);
            nr.getUsed().setMemory(nr.getUsed().getMemory() + containerMem);
        }
    }
    Assert.assertEquals("" + requestedHosts, nodeReports.keySet(), Sets.newHashSet(requestedHosts.values()));
    for (Map.Entry<PTContainer, String> e : requestedHosts.entrySet()) {
        for (PTOperator oper : e.getKey().getOperators()) {
            if (oper.getNodeLocalOperators().getOperatorSet().size() > 1) {
                String expHost = null;
                for (PTOperator nodeLocalOper : oper.getNodeLocalOperators().getOperatorSet()) {
                    Assert.assertNotNull("host null " + nodeLocalOper.getContainer(), nodeLocalOper.getContainer().host);
                    if (expHost == null) {
                        expHost = nodeLocalOper.getContainer().host;
                    } else {
                        Assert.assertEquals("expected same host " + nodeLocalOper, expHost, nodeLocalOper.getContainer().host);
                    }
                }
            }
        }
    }
}
Also used : ContainerStartRequest(com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) MemoryStorageAgent(com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent) PTContainer(com.datatorrent.stram.plan.physical.PTContainer) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) File(java.io.File) Map(java.util.Map) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Example 18 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project apex-core by apache.

the class AffinityRulesTest method testOperatorPartitionsAntiAffinity.

@Test
public void testOperatorPartitionsAntiAffinity() {
    LogicalPlan dag = new LogicalPlan();
    TestGeneratorInputOperator o1 = dag.addOperator("O1", new TestGeneratorInputOperator());
    GenericTestOperator o2 = dag.addOperator("O2", new GenericTestOperator());
    GenericTestOperator o3 = dag.addOperator("O3", new GenericTestOperator());
    dag.addStream("stream1", o1.outport, o2.inport1);
    dag.addStream("stream2", o2.outport1, o3.inport1);
    dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(5));
    AffinityRulesSet ruleSet = new AffinityRulesSet();
    // Valid case:
    List<AffinityRule> rules = new ArrayList<>();
    ruleSet.setAffinityRules(rules);
    AffinityRule rule1 = new AffinityRule(Type.ANTI_AFFINITY, Locality.NODE_LOCAL, false, "O2", "O2");
    rules.add(rule1);
    dag.setAttribute(DAGContext.AFFINITY_RULES_SET, ruleSet);
    dag.validate();
    dag.getAttributes().put(com.datatorrent.api.Context.DAGContext.APPLICATION_PATH, testMeta.getAbsolutePath());
    dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
    StreamingContainerManager scm = new StreamingContainerManager(dag);
    for (ContainerStartRequest csr : scm.containerStartRequests) {
        PTContainer container = csr.container;
        if (container.getOperators().get(0).getName().equals("O2")) {
            Assert.assertEquals("Anti-affinity containers set should have 4 containers for other partitions ", 4, container.getStrictAntiPrefs().size());
            for (PTContainer c : container.getStrictAntiPrefs()) {
                for (PTOperator operator : c.getOperators()) {
                    Assert.assertEquals("Partion for O2 should be Anti Prefs", "O2", operator.getName());
                }
            }
        }
    }
    // Check resource handler assigns different hosts for each partition
    ResourceRequestHandler rr = new ResourceRequestHandler();
    int containerMem = 1000;
    Map<String, NodeReport> nodeReports = Maps.newHashMap();
    for (int i = 0; i < 10; i++) {
        String hostName = "host" + i;
        NodeReport nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId(hostName, 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
        nodeReports.put(nr.getNodeId().getHost(), nr);
    }
    // set resources
    rr.updateNodeReports(Lists.newArrayList(nodeReports.values()));
    Set<String> partitionHostNames = new HashSet<>();
    for (ContainerStartRequest csr : scm.containerStartRequests) {
        String host = rr.getHost(csr, true);
        csr.container.host = host;
        if (csr.container.getOperators().get(0).getName().equals("O2")) {
            Assert.assertNotNull("Host name should not be null", host);
            LOG.info("Partition {} for operator O2 has host = {} ", csr.container.getId(), host);
            Assert.assertTrue("Each Partition should have a different host", !partitionHostNames.contains(host));
            partitionHostNames.add(host);
        }
    }
}
Also used : ContainerStartRequest(com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest) AffinityRule(com.datatorrent.api.AffinityRule) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) ArrayList(java.util.ArrayList) TestGeneratorInputOperator(com.datatorrent.stram.engine.TestGeneratorInputOperator) AffinityRulesSet(com.datatorrent.api.AffinityRulesSet) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) MemoryStorageAgent(com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent) PTContainer(com.datatorrent.stram.plan.physical.PTContainer) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 19 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project apex-core by apache.

the class HostLocalTest method testContainerLocal.

@Test
public void testContainerLocal() {
    LogicalPlan dag = new LogicalPlan();
    dag.getAttributes().put(com.datatorrent.api.Context.DAGContext.APPLICATION_PATH, new File("target", HostLocalTest.class.getName()).getAbsolutePath());
    dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
    GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
    dag.getMeta(o1).getAttributes().put(OperatorContext.LOCALITY_HOST, "host2");
    GenericTestOperator partitioned = dag.addOperator("partitioned", GenericTestOperator.class);
    dag.addStream("o1_outport1", o1.outport1, partitioned.inport1).setLocality(Locality.CONTAINER_LOCAL);
    dag.setOperatorAttribute(o1, OperatorContext.MEMORY_MB, 256);
    dag.setOperatorAttribute(partitioned, OperatorContext.MEMORY_MB, 256);
    StreamingContainerManager scm = new StreamingContainerManager(dag);
    ResourceRequestHandler rr = new ResourceRequestHandler();
    int containerMem = 1000;
    Map<String, NodeReport> nodeReports = Maps.newHashMap();
    NodeReport nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host1", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
    nodeReports.put(nr.getNodeId().getHost(), nr);
    nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host2", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
    nodeReports.put(nr.getNodeId().getHost(), nr);
    // set resources
    rr.updateNodeReports(Lists.newArrayList(nodeReports.values()));
    Assert.assertEquals("number of containers is 1", 1, scm.containerStartRequests.size());
    for (ContainerStartRequest csr : scm.containerStartRequests) {
        String host = rr.getHost(csr, true);
        csr.container.host = host;
        Assert.assertEquals("Hosts set to host2", "host2", host);
    }
}
Also used : ContainerStartRequest(com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) MemoryStorageAgent(com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) File(java.io.File) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Example 20 with NodeReport

use of org.apache.hadoop.yarn.api.records.NodeReport in project apex-core by apache.

the class HostLocalTest method testThreadLocal.

@Test
public void testThreadLocal() {
    LogicalPlan dag = new LogicalPlan();
    dag.getAttributes().put(com.datatorrent.api.Context.DAGContext.APPLICATION_PATH, new File("target", HostLocalTest.class.getName()).getAbsolutePath());
    dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
    GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
    dag.getMeta(o1).getAttributes().put(OperatorContext.LOCALITY_HOST, "host2");
    GenericTestOperator partitioned = dag.addOperator("partitioned", GenericTestOperator.class);
    dag.addStream("o1_outport1", o1.outport1, partitioned.inport1).setLocality(Locality.THREAD_LOCAL);
    dag.setOperatorAttribute(o1, OperatorContext.MEMORY_MB, 256);
    dag.setOperatorAttribute(partitioned, OperatorContext.MEMORY_MB, 256);
    StreamingContainerManager scm = new StreamingContainerManager(dag);
    ResourceRequestHandler rr = new ResourceRequestHandler();
    int containerMem = 1000;
    Map<String, NodeReport> nodeReports = Maps.newHashMap();
    NodeReport nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host1", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
    nodeReports.put(nr.getNodeId().getHost(), nr);
    nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host2", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
    nodeReports.put(nr.getNodeId().getHost(), nr);
    // set resources
    rr.updateNodeReports(Lists.newArrayList(nodeReports.values()));
    Assert.assertEquals("number of containers is 1", 1, scm.containerStartRequests.size());
    for (ContainerStartRequest csr : scm.containerStartRequests) {
        String host = rr.getHost(csr, true);
        csr.container.host = host;
        Assert.assertEquals("Hosts set to host2", "host2", host);
    }
}
Also used : ContainerStartRequest(com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) MemoryStorageAgent(com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) File(java.io.File) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Aggregations

NodeReport (org.apache.hadoop.yarn.api.records.NodeReport)49 Test (org.junit.Test)18 ArrayList (java.util.ArrayList)17 Resource (org.apache.hadoop.yarn.api.records.Resource)14 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)10 LogicalPlan (com.datatorrent.stram.plan.logical.LogicalPlan)10 ContainerStartRequest (com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest)9 MemoryStorageAgent (com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent)9 NodeId (org.apache.hadoop.yarn.api.records.NodeId)9 File (java.io.File)8 NodeState (org.apache.hadoop.yarn.api.records.NodeState)7 HashMap (java.util.HashMap)6 HashSet (java.util.HashSet)6 Priority (org.apache.hadoop.yarn.api.records.Priority)6 IOException (java.io.IOException)5 Map (java.util.Map)5 Container (org.apache.hadoop.yarn.api.records.Container)5 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)5 PrintWriter (java.io.PrintWriter)4 Configuration (org.apache.hadoop.conf.Configuration)4