Search in sources :

Example 1 with GetClusterNodesResponse

use of org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse in project hadoop by apache.

the class ClientRMService method getClusterNodes.

@Override
public GetClusterNodesResponse getClusterNodes(GetClusterNodesRequest request) throws YarnException {
    GetClusterNodesResponse response = recordFactory.newRecordInstance(GetClusterNodesResponse.class);
    EnumSet<NodeState> nodeStates = request.getNodeStates();
    if (nodeStates == null || nodeStates.isEmpty()) {
        nodeStates = EnumSet.allOf(NodeState.class);
    }
    Collection<RMNode> nodes = RMServerUtils.queryRMNodes(rmContext, nodeStates);
    List<NodeReport> nodeReports = new ArrayList<NodeReport>(nodes.size());
    for (RMNode nodeInfo : nodes) {
        nodeReports.add(createNodeReports(nodeInfo));
    }
    response.setNodeReports(nodeReports);
    return response;
}
Also used : RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NodeState(org.apache.hadoop.yarn.api.records.NodeState) GetClusterNodesResponse(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse) ArrayList(java.util.ArrayList) SchedulerNodeReport(org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport)

Example 2 with GetClusterNodesResponse

use of org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse in project apex-core by apache.

the class StramMiniClusterTest method testSetupShutdown.

@Test
public void testSetupShutdown() throws Exception {
    GetClusterNodesRequest request = Records.newRecord(GetClusterNodesRequest.class);
    ClientRMService clientRMService = yarnCluster.getResourceManager().getClientRMService();
    GetClusterNodesResponse response = clientRMService.getClusterNodes(request);
    List<NodeReport> nodeReports = response.getNodeReports();
    LOG.info("{}", nodeReports);
    for (NodeReport nr : nodeReports) {
        LOG.info("Node: {}", nr.getNodeId());
        LOG.info("Total memory: {}", nr.getCapability());
        LOG.info("Used memory: {}", nr.getUsed());
        LOG.info("Number containers: {}", nr.getNumContainers());
    }
    JarHelper jarHelper = new JarHelper();
    LOG.info("engine jar: {}", jarHelper.getJar(StreamingAppMaster.class));
    LOG.info("engine test jar: {}", jarHelper.getJar(StramMiniClusterTest.class));
    // create test application
    Properties dagProps = new Properties();
    // input module (ensure shutdown works while windows are generated)
    dagProps.put(StreamingApplication.APEX_PREFIX + "operator.numGen.classname", TestGeneratorInputOperator.class.getName());
    dagProps.put(StreamingApplication.APEX_PREFIX + "operator.numGen.maxTuples", "1");
    dagProps.put(StreamingApplication.APEX_PREFIX + "operator.module1.classname", GenericTestOperator.class.getName());
    dagProps.put(StreamingApplication.APEX_PREFIX + "operator.module2.classname", GenericTestOperator.class.getName());
    dagProps.put(StreamingApplication.APEX_PREFIX + "stream.fromNumGen.source", "numGen.outport");
    dagProps.put(StreamingApplication.APEX_PREFIX + "stream.fromNumGen.sinks", "module1.inport1");
    dagProps.put(StreamingApplication.APEX_PREFIX + "stream.n1n2.source", "module1.outport1");
    dagProps.put(StreamingApplication.APEX_PREFIX + "stream.n1n2.sinks", "module2.inport1");
    dagProps.setProperty(StreamingApplication.APEX_PREFIX + LogicalPlan.MASTER_MEMORY_MB.getName(), "128");
    dagProps.setProperty(StreamingApplication.APEX_PREFIX + LogicalPlan.CONTAINER_JVM_OPTIONS.getName(), "-Dlog4j.properties=custom_log4j.properties");
    dagProps.setProperty(StreamingApplication.APEX_PREFIX + "operator.*." + OperatorContext.MEMORY_MB.getName(), "64");
    dagProps.setProperty(StreamingApplication.APEX_PREFIX + "operator.*." + OperatorContext.VCORES.getName(), "1");
    dagProps.setProperty(StreamingApplication.APEX_PREFIX + "operator.*.port.*." + Context.PortContext.BUFFER_MEMORY_MB.getName(), "32");
    dagProps.setProperty(StreamingApplication.APEX_PREFIX + LogicalPlan.DEBUG.getName(), "true");
    LOG.info("dag properties: {}", dagProps);
    LOG.info("Initializing Client");
    LogicalPlanConfiguration tb = new LogicalPlanConfiguration(conf);
    tb.addFromProperties(dagProps, null);
    LogicalPlan dag = createDAG(tb);
    Configuration yarnConf = new Configuration(yarnCluster.getConfig());
    StramClient client = new StramClient(yarnConf, dag);
    try {
        client.start();
        if (StringUtils.isBlank(System.getenv("JAVA_HOME"))) {
            // JAVA_HOME not set in the yarn mini cluster
            client.javaCmd = "java";
        }
        LOG.info("Running client");
        client.startApplication();
        boolean result = client.monitorApplication();
        LOG.info("Client run completed. Result=" + result);
        Assert.assertTrue(result);
    } finally {
        client.stop();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) LogicalPlanConfiguration(com.datatorrent.stram.plan.logical.LogicalPlanConfiguration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) GetClusterNodesResponse(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse) TestGeneratorInputOperator(com.datatorrent.stram.engine.TestGeneratorInputOperator) Properties(java.util.Properties) ClientRMService(org.apache.hadoop.yarn.server.resourcemanager.ClientRMService) LogicalPlanConfiguration(com.datatorrent.stram.plan.logical.LogicalPlanConfiguration) JarHelper(org.apache.apex.common.util.JarHelper) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) GetClusterNodesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport) Test(org.junit.Test)

Example 3 with GetClusterNodesResponse

use of org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse in project hadoop by apache.

the class YarnClientImpl method getNodeReports.

@Override
public List<NodeReport> getNodeReports(NodeState... states) throws YarnException, IOException {
    EnumSet<NodeState> statesSet = (states.length == 0) ? EnumSet.allOf(NodeState.class) : EnumSet.noneOf(NodeState.class);
    for (NodeState state : states) {
        statesSet.add(state);
    }
    GetClusterNodesRequest request = GetClusterNodesRequest.newInstance(statesSet);
    GetClusterNodesResponse response = rmClient.getClusterNodes(request);
    return response.getNodeReports();
}
Also used : NodeState(org.apache.hadoop.yarn.api.records.NodeState) GetClusterNodesResponse(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse) GetClusterNodesRequest(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)

Aggregations

GetClusterNodesResponse (org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse)3 GetClusterNodesRequest (org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)2 NodeReport (org.apache.hadoop.yarn.api.records.NodeReport)2 NodeState (org.apache.hadoop.yarn.api.records.NodeState)2 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)1 TestGeneratorInputOperator (com.datatorrent.stram.engine.TestGeneratorInputOperator)1 LogicalPlan (com.datatorrent.stram.plan.logical.LogicalPlan)1 LogicalPlanConfiguration (com.datatorrent.stram.plan.logical.LogicalPlanConfiguration)1 ArrayList (java.util.ArrayList)1 Properties (java.util.Properties)1 JarHelper (org.apache.apex.common.util.JarHelper)1 Configuration (org.apache.hadoop.conf.Configuration)1 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)1 ClientRMService (org.apache.hadoop.yarn.server.resourcemanager.ClientRMService)1 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)1 SchedulerNodeReport (org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport)1 Test (org.junit.Test)1