use of org.apache.hadoop.yarn.api.records.NodeReport in project hadoop by apache.
the class MockResourceManagerFacade method allocate.
@SuppressWarnings("deprecation")
@Override
public AllocateResponse allocate(AllocateRequest request) throws YarnException, IOException {
if (request.getAskList() != null && request.getAskList().size() > 0 && request.getReleaseList() != null && request.getReleaseList().size() > 0) {
Assert.fail("The mock RM implementation does not support receiving " + "askList and releaseList in the same heartbeat");
}
String amrmToken = getAppIdentifier();
ArrayList<Container> containerList = new ArrayList<Container>();
if (request.getAskList() != null) {
for (ResourceRequest rr : request.getAskList()) {
for (int i = 0; i < rr.getNumContainers(); i++) {
ContainerId containerId = ContainerId.newInstance(getApplicationAttemptId(1), containerIndex.incrementAndGet());
Container container = Records.newRecord(Container.class);
container.setId(containerId);
container.setPriority(rr.getPriority());
// We don't use the node for running containers in the test cases. So
// it is OK to hard code it to some dummy value
NodeId nodeId = NodeId.newInstance(!Strings.isNullOrEmpty(rr.getResourceName()) ? rr.getResourceName() : "dummy", 1000);
container.setNodeId(nodeId);
container.setResource(rr.getCapability());
containerList.add(container);
synchronized (applicationContainerIdMap) {
// Keep track of the containers returned to this application. We
// will need it in future
Assert.assertTrue("The application id is Not registered before allocate(): " + amrmToken, applicationContainerIdMap.containsKey(amrmToken));
List<ContainerId> ids = applicationContainerIdMap.get(amrmToken);
ids.add(containerId);
this.allocatedContainerMap.put(containerId, container);
}
}
}
}
if (request.getReleaseList() != null && request.getReleaseList().size() > 0) {
Log.getLog().info("Releasing containers: " + request.getReleaseList().size());
synchronized (applicationContainerIdMap) {
Assert.assertTrue("The application id is not registered before allocate(): " + amrmToken, applicationContainerIdMap.containsKey(amrmToken));
List<ContainerId> ids = applicationContainerIdMap.get(amrmToken);
for (ContainerId id : request.getReleaseList()) {
boolean found = false;
for (ContainerId c : ids) {
if (c.equals(id)) {
found = true;
break;
}
}
Assert.assertTrue("ContainerId " + id + " being released is not valid for application: " + conf.get("AMRMTOKEN"), found);
ids.remove(id);
// Return the released container back to the AM with new fake Ids. The
// test case does not care about the IDs. The IDs are faked because
// otherwise the LRM will throw duplication identifier exception. This
// returning of fake containers is ONLY done for testing purpose - for
// the test code to get confirmation that the sub-cluster resource
// managers received the release request
ContainerId fakeContainerId = ContainerId.newInstance(getApplicationAttemptId(1), containerIndex.incrementAndGet());
Container fakeContainer = allocatedContainerMap.get(id);
fakeContainer.setId(fakeContainerId);
containerList.add(fakeContainer);
}
}
}
Log.getLog().info("Allocating containers: " + containerList.size() + " for application attempt: " + conf.get("AMRMTOKEN"));
// Always issue a new AMRMToken as if RM rolled master key
Token newAMRMToken = Token.newInstance(new byte[0], "", new byte[0], "");
return AllocateResponse.newInstance(0, new ArrayList<ContainerStatus>(), containerList, new ArrayList<NodeReport>(), null, AMCommand.AM_RESYNC, 1, null, new ArrayList<NMToken>(), newAMRMToken, new ArrayList<UpdatedContainer>());
}
use of org.apache.hadoop.yarn.api.records.NodeReport in project hadoop by apache.
the class TestAMRMRPCNodeUpdates method testAMRMUnusableNodes.
@Test
public void testAMRMUnusableNodes() throws Exception {
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 10000);
MockNM nm2 = rm.registerNode("127.0.0.2:1234", 10000);
MockNM nm3 = rm.registerNode("127.0.0.3:1234", 10000);
MockNM nm4 = rm.registerNode("127.0.0.4:1234", 10000);
dispatcher.await();
RMApp app1 = rm.submitApp(2000);
// Trigger the scheduling so the AM gets 'launched' on nm1
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
// register AM returns no unusable node
am1.registerAppAttempt();
// allocate request returns no updated node
AllocateRequest allocateRequest1 = AllocateRequest.newInstance(0, 0F, null, null, null);
AllocateResponse response1 = allocate(attempt1.getAppAttemptId(), allocateRequest1);
List<NodeReport> updatedNodes = response1.getUpdatedNodes();
Assert.assertEquals(0, updatedNodes.size());
syncNodeHeartbeat(nm4, false);
// allocate request returns updated node
allocateRequest1 = AllocateRequest.newInstance(response1.getResponseId(), 0F, null, null, null);
response1 = allocate(attempt1.getAppAttemptId(), allocateRequest1);
updatedNodes = response1.getUpdatedNodes();
Assert.assertEquals(1, updatedNodes.size());
NodeReport nr = updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(), nr.getNodeId());
Assert.assertEquals(NodeState.UNHEALTHY, nr.getNodeState());
// resending the allocate request returns the same result
response1 = allocate(attempt1.getAppAttemptId(), allocateRequest1);
updatedNodes = response1.getUpdatedNodes();
Assert.assertEquals(1, updatedNodes.size());
nr = updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(), nr.getNodeId());
Assert.assertEquals(NodeState.UNHEALTHY, nr.getNodeState());
syncNodeLost(nm3);
// subsequent allocate request returns delta
allocateRequest1 = AllocateRequest.newInstance(response1.getResponseId(), 0F, null, null, null);
response1 = allocate(attempt1.getAppAttemptId(), allocateRequest1);
updatedNodes = response1.getUpdatedNodes();
Assert.assertEquals(1, updatedNodes.size());
nr = updatedNodes.iterator().next();
Assert.assertEquals(nm3.getNodeId(), nr.getNodeId());
Assert.assertEquals(NodeState.LOST, nr.getNodeState());
// registering another AM gives it the complete failed list
RMApp app2 = rm.submitApp(2000);
// Trigger nm2 heartbeat so that AM gets launched on it
nm2.nodeHeartbeat(true);
RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
// register AM returns all unusable nodes
am2.registerAppAttempt();
// allocate request returns no updated node
AllocateRequest allocateRequest2 = AllocateRequest.newInstance(0, 0F, null, null, null);
AllocateResponse response2 = allocate(attempt2.getAppAttemptId(), allocateRequest2);
updatedNodes = response2.getUpdatedNodes();
Assert.assertEquals(0, updatedNodes.size());
syncNodeHeartbeat(nm4, true);
// both AM's should get delta updated nodes
allocateRequest1 = AllocateRequest.newInstance(response1.getResponseId(), 0F, null, null, null);
response1 = allocate(attempt1.getAppAttemptId(), allocateRequest1);
updatedNodes = response1.getUpdatedNodes();
Assert.assertEquals(1, updatedNodes.size());
nr = updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(), nr.getNodeId());
Assert.assertEquals(NodeState.RUNNING, nr.getNodeState());
allocateRequest2 = AllocateRequest.newInstance(response2.getResponseId(), 0F, null, null, null);
response2 = allocate(attempt2.getAppAttemptId(), allocateRequest2);
updatedNodes = response2.getUpdatedNodes();
Assert.assertEquals(1, updatedNodes.size());
nr = updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(), nr.getNodeId());
Assert.assertEquals(NodeState.RUNNING, nr.getNodeState());
// subsequent allocate calls should return no updated nodes
allocateRequest2 = AllocateRequest.newInstance(response2.getResponseId(), 0F, null, null, null);
response2 = allocate(attempt2.getAppAttemptId(), allocateRequest2);
updatedNodes = response2.getUpdatedNodes();
Assert.assertEquals(0, updatedNodes.size());
// how to do the above for LOST node
}
use of org.apache.hadoop.yarn.api.records.NodeReport in project hadoop by apache.
the class TestJobImpl method testUnusableNodeTransition.
@Test(timeout = 20000)
public void testUnusableNodeTransition() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
conf.setInt(MRJobConfig.NUM_REDUCES, 1);
DrainDispatcher dispatcher = new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
CyclicBarrier syncBarrier = new CyclicBarrier(2);
OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
final JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
// add a special task event handler to put the task back to running in case
// of task rescheduling/killing
EventHandler<TaskAttemptEvent> taskAttemptEventHandler = new EventHandler<TaskAttemptEvent>() {
@Override
public void handle(TaskAttemptEvent event) {
if (event.getType() == TaskAttemptEventType.TA_KILL) {
job.decrementSucceededMapperCount();
}
}
};
dispatcher.register(TaskAttemptEventType.class, taskAttemptEventHandler);
// replace the tasks with spied versions to return the right attempts
Map<TaskId, Task> spiedTasks = new HashMap<TaskId, Task>();
List<NodeReport> nodeReports = new ArrayList<NodeReport>();
Map<NodeReport, TaskId> nodeReportsToTaskIds = new HashMap<NodeReport, TaskId>();
for (Map.Entry<TaskId, Task> e : job.tasks.entrySet()) {
TaskId taskId = e.getKey();
Task task = e.getValue();
if (taskId.getTaskType() == TaskType.MAP) {
// add an attempt to the task to simulate nodes
NodeId nodeId = mock(NodeId.class);
TaskAttempt attempt = mock(TaskAttempt.class);
when(attempt.getNodeId()).thenReturn(nodeId);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
when(attempt.getID()).thenReturn(attemptId);
// create a spied task
Task spied = spy(task);
doReturn(attempt).when(spied).getAttempt(any(TaskAttemptId.class));
spiedTasks.put(taskId, spied);
// create a NodeReport based on the node id
NodeReport report = mock(NodeReport.class);
when(report.getNodeState()).thenReturn(NodeState.UNHEALTHY);
when(report.getNodeId()).thenReturn(nodeId);
nodeReports.add(report);
nodeReportsToTaskIds.put(report, taskId);
}
}
// replace the tasks with the spied tasks
job.tasks.putAll(spiedTasks);
// complete all mappers first
for (TaskId taskId : job.tasks.keySet()) {
if (taskId.getTaskType() == TaskType.MAP) {
// generate a task attempt completed event first to populate the
// nodes-to-succeeded-attempts map
TaskAttemptCompletionEvent tce = Records.newRecord(TaskAttemptCompletionEvent.class);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
tce.setAttemptId(attemptId);
tce.setStatus(TaskAttemptCompletionEventStatus.SUCCEEDED);
job.handle(new JobTaskAttemptCompletedEvent(tce));
// complete the task itself
job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
Assert.assertEquals(JobState.RUNNING, job.getState());
}
}
// add an event for a node transition
NodeReport firstMapperNodeReport = nodeReports.get(0);
NodeReport secondMapperNodeReport = nodeReports.get(1);
job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(firstMapperNodeReport)));
dispatcher.await();
// complete the reducer
for (TaskId taskId : job.tasks.keySet()) {
if (taskId.getTaskType() == TaskType.REDUCE) {
job.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
}
}
// add another event for a node transition for the other mapper
// this should not trigger rescheduling
job.handle(new JobUpdatedNodesEvent(job.getID(), Collections.singletonList(secondMapperNodeReport)));
// complete the first mapper that was rescheduled
TaskId firstMapper = nodeReportsToTaskIds.get(firstMapperNodeReport);
job.handle(new JobTaskEvent(firstMapper, TaskState.SUCCEEDED));
// verify the state is moving to committing
assertJobState(job, JobStateInternal.COMMITTING);
// let the committer complete and verify the job succeeds
syncBarrier.await();
assertJobState(job, JobStateInternal.SUCCEEDED);
dispatcher.stop();
commitHandler.stop();
}
use of org.apache.hadoop.yarn.api.records.NodeReport in project alluxio by Alluxio.
the class ContainerAllocatorTest method setup.
/*
* Creates a container allocator for allocating the specified numContainers with the specified
* maxContainersPerHost.
*
* The yarn client is mocked to make it look like there are numHosts different hosts in the
* system, and the resource manager client is mocked to allocate containers when they are
* requested.
*/
private ContainerAllocator setup(int numHosts, int maxContainersPerHost, int numContainers) throws Exception {
ContainerAllocator containerAllocator = new ContainerAllocator(CONTAINER_NAME, numContainers, maxContainersPerHost, mResource, mYarnClient, mRMClient);
List<NodeReport> nodeReports = new ArrayList<>();
for (int i = 0; i < numHosts; i++) {
NodeReport nodeReport = Records.newRecord(NodeReport.class);
nodeReport.setNodeId(NodeId.newInstance("host" + i, 0));
nodeReports.add(nodeReport);
}
when(mYarnClient.getNodeReports(Matchers.<NodeState[]>anyVararg())).thenReturn(nodeReports);
doAnswer(allocateFirstHostAnswer(containerAllocator)).when(mRMClient).addContainerRequest(any(ContainerRequest.class));
return containerAllocator;
}
use of org.apache.hadoop.yarn.api.records.NodeReport in project apex-core by apache.
the class ResourceRequestHandler method updateNodeReports.
/**
* Tracks update to available resources. Resource availability is used to make decisions about where to request new
* containers.
*
* @param nodeReports
*/
public void updateNodeReports(List<NodeReport> nodeReports) {
for (NodeReport nr : nodeReports) {
LOG.debug("Node report: rackName={}, nodeid={}, numContainers={}, capability={}, used={}, state={}", nr.getRackName(), nr.getNodeId(), nr.getNumContainers(), nr.getCapability(), nr.getUsed(), nr.getNodeState());
nodeReportMap.put(nr.getNodeId().getHost(), nr);
nodeToRack.put(nr.getNodeId().getHost(), nr.getRackName());
}
}
Aggregations