use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.
the class TestRMContainerAllocator method testUnsupportedReduceContainerRequirement.
@Test
public void testUnsupportedReduceContainerRequirement() throws Exception {
final Resource maxContainerSupported = Resource.newInstance(1, 1);
final ApplicationId appId = ApplicationId.newInstance(1, 1);
final ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
final JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
final MockScheduler mockScheduler = new MockScheduler(appAttemptId);
final Configuration conf = new Configuration();
final MyContainerAllocator allocator = new MyContainerAllocator(null, conf, appAttemptId, mock(Job.class), SystemClock.getInstance()) {
@Override
protected void register() {
}
@Override
protected ApplicationMasterProtocol createSchedulerProxy() {
return mockScheduler;
}
@Override
protected Resource getMaxContainerCapability() {
return maxContainerSupported;
}
};
ContainerRequestEvent reduceRequestEvt = createReq(jobId, 0, (int) (maxContainerSupported.getMemorySize() + 10), maxContainerSupported.getVirtualCores(), new String[0], false, true);
allocator.sendRequests(Arrays.asList(reduceRequestEvt));
// Reducer container requests are added to the pending queue upon request,
// schedule all reducers here so that we can observe if reducer requests
// are accepted by RMContainerAllocator on RM side.
allocator.scheduleAllReduces();
allocator.schedule();
Assert.assertEquals(0, mockScheduler.lastAnyAskReduce);
}
use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.
the class TestRMContainerAllocator method testUpdatedNodes.
@Test
public void testUpdatedNodes() throws Exception {
Configuration conf = new Configuration();
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext().getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob);
// add resources to scheduler
MockNM nm1 = rm.registerNode("h1:1234", 10240);
MockNM nm2 = rm.registerNode("h2:1234", 10240);
dispatcher.await();
// create the map container request
ContainerRequestEvent event = createReq(jobId, 1, 1024, new String[] { "h1" });
allocator.sendRequest(event);
TaskAttemptId attemptId = event.getAttemptID();
TaskAttempt mockTaskAttempt = mock(TaskAttempt.class);
when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId());
Task mockTask = mock(Task.class);
when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt);
when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask);
// this tells the scheduler about the requests
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
nm1.nodeHeartbeat(true);
dispatcher.await();
Assert.assertEquals(1, allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(3, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
allocator.getJobUpdatedNodeEvents().clear();
// get the assignment
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals(1, assigned.size());
Assert.assertEquals(nm1.getNodeId(), assigned.get(0).getContainer().getNodeId());
// no updated nodes reported
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
// mark nodes bad
nm1.nodeHeartbeat(false);
nm2.nodeHeartbeat(false);
dispatcher.await();
// schedule response returns updated nodes
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals(0, assigned.size());
// updated nodes are reported
Assert.assertEquals(1, allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(1, allocator.getTaskAttemptKillEvents().size());
Assert.assertEquals(2, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
Assert.assertEquals(attemptId, allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID());
allocator.getJobUpdatedNodeEvents().clear();
allocator.getTaskAttemptKillEvents().clear();
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals(0, assigned.size());
// no updated nodes reported
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
}
use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.
the class TestTaskAttempt method testHostResolveAttempt.
@Test
public void testHostResolveAttempt() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct = new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler = mock(EventHandler.class);
String[] hosts = new String[3];
hosts[0] = "192.168.1.1";
hosts[1] = "host2";
hosts[2] = "host3";
TaskSplitMetaInfo splitInfo = new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt = createMapTaskAttemptImplForTest(eventHandler, splitInfo);
TaskAttemptImpl spyTa = spy(mockTaskAttempt);
when(spyTa.resolveHost(hosts[0])).thenReturn("host1");
spyTa.dataLocalHosts = spyTa.resolveHosts(splitInfo.getLocations());
TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
rct.transition(spyTa, mockTAEvent);
verify(spyTa).resolveHost(hosts[0]);
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(eventHandler, times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
Map<String, Boolean> expected = new HashMap<String, Boolean>();
expected.put("host1", true);
expected.put("host2", true);
expected.put("host3", true);
ContainerRequestEvent cre = (ContainerRequestEvent) arg.getAllValues().get(1);
String[] requestedHosts = cre.getHosts();
for (String h : requestedHosts) {
expected.remove(h);
}
assertEquals(0, expected.size());
}
use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.
the class TestTaskAttempt method testSingleRackRequest.
@Test
public void testSingleRackRequest() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct = new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler = mock(EventHandler.class);
String[] hosts = new String[3];
hosts[0] = "host1";
hosts[1] = "host2";
hosts[2] = "host3";
TaskSplitMetaInfo splitInfo = new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt = createMapTaskAttemptImplForTest(eventHandler, splitInfo);
TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
rct.transition(mockTaskAttempt, mockTAEvent);
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(eventHandler, times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
ContainerRequestEvent cre = (ContainerRequestEvent) arg.getAllValues().get(1);
String[] requestedRacks = cre.getRacks();
//Only a single occurrence of /DefaultRack
assertEquals(1, requestedRacks.length);
}
use of org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent in project hadoop by apache.
the class TestRMContainerAllocator method createReq.
private ContainerRequestEvent createReq(JobId jobId, int taskAttemptId, int memory, int vcore, String[] hosts, boolean earlierFailedAttempt, boolean reduce) {
TaskId taskId;
if (reduce) {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
} else {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
}
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId);
Resource containerNeed = Resource.newInstance(memory, vcore);
if (earlierFailedAttempt) {
return ContainerRequestEvent.createContainerRequestEventForFailedContainer(attemptId, containerNeed);
}
return new ContainerRequestEvent(attemptId, containerNeed, hosts, new String[] { NetworkTopology.DEFAULT_RACK });
}
Aggregations