use of org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest in project hadoop by apache.
the class TestClientRMService method testRMStartWithDecommissionedNode.
@Test
public void testRMStartWithDecommissionedNode() throws Exception {
String excludeFile = "excludeFile";
createExcludeFile(excludeFile);
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, excludeFile);
MockRM rm = new MockRM(conf) {
protected ClientRMService createClientRMService() {
return new ClientRMService(this.rmContext, scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.getRMContext().getRMDelegationTokenSecretManager());
}
;
};
rm.start();
YarnRPC rpc = YarnRPC.create(conf);
InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
LOG.info("Connecting to ResourceManager at " + rmAddress);
ApplicationClientProtocol client = (ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class, rmAddress, conf);
// Make call
GetClusterNodesRequest request = GetClusterNodesRequest.newInstance(EnumSet.allOf(NodeState.class));
List<NodeReport> nodeReports = client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(1, nodeReports.size());
rm.stop();
rpc.stopProxy(client, conf);
new File(excludeFile).delete();
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest in project apex-core by apache.
the class StramMiniClusterTest method testSetupShutdown.
@Test
public void testSetupShutdown() throws Exception {
GetClusterNodesRequest request = Records.newRecord(GetClusterNodesRequest.class);
ClientRMService clientRMService = yarnCluster.getResourceManager().getClientRMService();
GetClusterNodesResponse response = clientRMService.getClusterNodes(request);
List<NodeReport> nodeReports = response.getNodeReports();
LOG.info("{}", nodeReports);
for (NodeReport nr : nodeReports) {
LOG.info("Node: {}", nr.getNodeId());
LOG.info("Total memory: {}", nr.getCapability());
LOG.info("Used memory: {}", nr.getUsed());
LOG.info("Number containers: {}", nr.getNumContainers());
}
JarHelper jarHelper = new JarHelper();
LOG.info("engine jar: {}", jarHelper.getJar(StreamingAppMaster.class));
LOG.info("engine test jar: {}", jarHelper.getJar(StramMiniClusterTest.class));
// create test application
Properties dagProps = new Properties();
// input module (ensure shutdown works while windows are generated)
dagProps.put(StreamingApplication.APEX_PREFIX + "operator.numGen.classname", TestGeneratorInputOperator.class.getName());
dagProps.put(StreamingApplication.APEX_PREFIX + "operator.numGen.maxTuples", "1");
dagProps.put(StreamingApplication.APEX_PREFIX + "operator.module1.classname", GenericTestOperator.class.getName());
dagProps.put(StreamingApplication.APEX_PREFIX + "operator.module2.classname", GenericTestOperator.class.getName());
dagProps.put(StreamingApplication.APEX_PREFIX + "stream.fromNumGen.source", "numGen.outport");
dagProps.put(StreamingApplication.APEX_PREFIX + "stream.fromNumGen.sinks", "module1.inport1");
dagProps.put(StreamingApplication.APEX_PREFIX + "stream.n1n2.source", "module1.outport1");
dagProps.put(StreamingApplication.APEX_PREFIX + "stream.n1n2.sinks", "module2.inport1");
dagProps.setProperty(StreamingApplication.APEX_PREFIX + LogicalPlan.MASTER_MEMORY_MB.getName(), "128");
dagProps.setProperty(StreamingApplication.APEX_PREFIX + LogicalPlan.CONTAINER_JVM_OPTIONS.getName(), "-Dlog4j.properties=custom_log4j.properties");
dagProps.setProperty(StreamingApplication.APEX_PREFIX + "operator.*." + OperatorContext.MEMORY_MB.getName(), "64");
dagProps.setProperty(StreamingApplication.APEX_PREFIX + "operator.*." + OperatorContext.VCORES.getName(), "1");
dagProps.setProperty(StreamingApplication.APEX_PREFIX + "operator.*.port.*." + Context.PortContext.BUFFER_MEMORY_MB.getName(), "32");
dagProps.setProperty(StreamingApplication.APEX_PREFIX + LogicalPlan.DEBUG.getName(), "true");
LOG.info("dag properties: {}", dagProps);
LOG.info("Initializing Client");
LogicalPlanConfiguration tb = new LogicalPlanConfiguration(conf);
tb.addFromProperties(dagProps, null);
LogicalPlan dag = createDAG(tb);
Configuration yarnConf = new Configuration(yarnCluster.getConfig());
StramClient client = new StramClient(yarnConf, dag);
try {
client.start();
if (StringUtils.isBlank(System.getenv("JAVA_HOME"))) {
// JAVA_HOME not set in the yarn mini cluster
client.javaCmd = "java";
}
LOG.info("Running client");
client.startApplication();
boolean result = client.monitorApplication();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(result);
} finally {
client.stop();
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest in project hadoop by apache.
the class TestClientRMService method testGetClusterNodes.
@Test
public void testGetClusterNodes() throws Exception {
MockRM rm = new MockRM() {
protected ClientRMService createClientRMService() {
return new ClientRMService(this.rmContext, scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.getRMContext().getRMDelegationTokenSecretManager());
}
;
};
rm.start();
RMNodeLabelsManager labelsMgr = rm.getRMContext().getNodeLabelManager();
labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y"));
// Add a healthy node with label = x
MockNM node = rm.registerNode("host1:1234", 1024);
Map<NodeId, Set<String>> map = new HashMap<NodeId, Set<String>>();
map.put(node.getNodeId(), ImmutableSet.of("x"));
labelsMgr.replaceLabelsOnNode(map);
rm.sendNodeStarted(node);
node.nodeHeartbeat(true);
// Add and lose a node with label = y
MockNM lostNode = rm.registerNode("host2:1235", 1024);
rm.sendNodeStarted(lostNode);
lostNode.nodeHeartbeat(true);
rm.waitForState(lostNode.getNodeId(), NodeState.RUNNING);
rm.sendNodeLost(lostNode);
// Create a client.
Configuration conf = new Configuration();
YarnRPC rpc = YarnRPC.create(conf);
InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
LOG.info("Connecting to ResourceManager at " + rmAddress);
ApplicationClientProtocol client = (ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class, rmAddress, conf);
// Make call
GetClusterNodesRequest request = GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.RUNNING));
List<NodeReport> nodeReports = client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(1, nodeReports.size());
Assert.assertNotSame("Node is expected to be healthy!", NodeState.UNHEALTHY, nodeReports.get(0).getNodeState());
// Check node's label = x
Assert.assertTrue(nodeReports.get(0).getNodeLabels().contains("x"));
// Now make the node unhealthy.
node.nodeHeartbeat(false);
rm.waitForState(node.getNodeId(), NodeState.UNHEALTHY);
// Call again
nodeReports = client.getClusterNodes(request).getNodeReports();
Assert.assertEquals("Unhealthy nodes should not show up by default", 0, nodeReports.size());
// Change label of host1 to y
map = new HashMap<NodeId, Set<String>>();
map.put(node.getNodeId(), ImmutableSet.of("y"));
labelsMgr.replaceLabelsOnNode(map);
// Now query for UNHEALTHY nodes
request = GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.UNHEALTHY));
nodeReports = client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(1, nodeReports.size());
Assert.assertEquals("Node is expected to be unhealthy!", NodeState.UNHEALTHY, nodeReports.get(0).getNodeState());
Assert.assertTrue(nodeReports.get(0).getNodeLabels().contains("y"));
// Remove labels of host1
map = new HashMap<NodeId, Set<String>>();
map.put(node.getNodeId(), ImmutableSet.of("y"));
labelsMgr.removeLabelsFromNode(map);
// Query all states should return all nodes
rm.registerNode("host3:1236", 1024);
request = GetClusterNodesRequest.newInstance(EnumSet.allOf(NodeState.class));
nodeReports = client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(3, nodeReports.size());
// All host1-3's label should be empty (instead of null)
for (NodeReport report : nodeReports) {
Assert.assertTrue(report.getNodeLabels() != null && report.getNodeLabels().isEmpty());
}
rpc.stopProxy(client, conf);
rm.close();
}
use of org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest in project hadoop by apache.
the class YarnClientImpl method getNodeReports.
@Override
public List<NodeReport> getNodeReports(NodeState... states) throws YarnException, IOException {
EnumSet<NodeState> statesSet = (states.length == 0) ? EnumSet.allOf(NodeState.class) : EnumSet.noneOf(NodeState.class);
for (NodeState state : states) {
statesSet.add(state);
}
GetClusterNodesRequest request = GetClusterNodesRequest.newInstance(statesSet);
GetClusterNodesResponse response = rmClient.getClusterNodes(request);
return response.getNodeReports();
}
Aggregations