use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class RMContainerAllocator method handleReduceContainerRequest.
@SuppressWarnings({ "unchecked" })
private void handleReduceContainerRequest(ContainerRequestEvent reqEvent) {
assert (reqEvent.getAttemptID().getTaskId().getTaskType().equals(TaskType.REDUCE));
Resource supportedMaxContainerCapability = getMaxContainerCapability();
JobId jobId = getJob().getID();
if (reduceResourceRequest.equals(Resources.none())) {
reduceResourceRequest = reqEvent.getCapability();
eventHandler.handle(new JobHistoryEvent(jobId, new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.REDUCE, reduceResourceRequest.getMemorySize())));
LOG.info("reduceResourceRequest:" + reduceResourceRequest);
}
boolean reduceContainerRequestAccepted = true;
if (reduceResourceRequest.getMemorySize() > supportedMaxContainerCapability.getMemorySize() || reduceResourceRequest.getVirtualCores() > supportedMaxContainerCapability.getVirtualCores()) {
reduceContainerRequestAccepted = false;
}
if (reduceContainerRequestAccepted) {
// set the resources
reqEvent.getCapability().setVirtualCores(reduceResourceRequest.getVirtualCores());
reqEvent.getCapability().setMemorySize(reduceResourceRequest.getMemorySize());
if (reqEvent.getEarlierAttemptFailed()) {
//previously failed reducers are added to the front for fail fast
pendingReduces.addFirst(new ContainerRequest(reqEvent, PRIORITY_REDUCE, reduceNodeLabelExpression));
} else {
//reduces are added to pending queue and are slowly ramped up
pendingReduces.add(new ContainerRequest(reqEvent, PRIORITY_REDUCE, reduceNodeLabelExpression));
}
} else {
String diagMsg = "REDUCE capability required is more than the " + "supported max container capability in the cluster. Killing" + " the Job. reduceResourceRequest: " + reduceResourceRequest + " maxContainerCapability:" + supportedMaxContainerCapability;
LOG.info(diagMsg);
eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg));
eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
}
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestLocalContainerAllocator method testAllocatedContainerResourceIsNotNull.
@Test
public void testAllocatedContainerResourceIsNotNull() {
ArgumentCaptor<TaskAttemptContainerAssignedEvent> containerAssignedCaptor = ArgumentCaptor.forClass(TaskAttemptContainerAssignedEvent.class);
@SuppressWarnings("unchecked") EventHandler<Event> eventHandler = mock(EventHandler.class);
AppContext context = mock(AppContext.class);
when(context.getEventHandler()).thenReturn(eventHandler);
ContainerId containerId = ContainerId.fromString("container_1427562107907_0002_01_000001");
LocalContainerAllocator containerAllocator = new LocalContainerAllocator(mock(ClientService.class), context, "localhost", -1, -1, containerId);
ContainerAllocatorEvent containerAllocatorEvent = createContainerRequestEvent();
containerAllocator.handle(containerAllocatorEvent);
verify(eventHandler, times(1)).handle(containerAssignedCaptor.capture());
Container container = containerAssignedCaptor.getValue().getContainer();
Resource containerResource = container.getResource();
Assert.assertNotNull(containerResource);
Assert.assertEquals(containerResource.getMemorySize(), 0);
Assert.assertEquals(containerResource.getVirtualCores(), 0);
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestRMContainerAllocator method testUnsupportedReduceContainerRequirement.
@Test
public void testUnsupportedReduceContainerRequirement() throws Exception {
final Resource maxContainerSupported = Resource.newInstance(1, 1);
final ApplicationId appId = ApplicationId.newInstance(1, 1);
final ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
final JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
final MockScheduler mockScheduler = new MockScheduler(appAttemptId);
final Configuration conf = new Configuration();
final MyContainerAllocator allocator = new MyContainerAllocator(null, conf, appAttemptId, mock(Job.class), SystemClock.getInstance()) {
@Override
protected void register() {
}
@Override
protected ApplicationMasterProtocol createSchedulerProxy() {
return mockScheduler;
}
@Override
protected Resource getMaxContainerCapability() {
return maxContainerSupported;
}
};
ContainerRequestEvent reduceRequestEvt = createReq(jobId, 0, (int) (maxContainerSupported.getMemorySize() + 10), maxContainerSupported.getVirtualCores(), new String[0], false, true);
allocator.sendRequests(Arrays.asList(reduceRequestEvt));
// Reducer container requests are added to the pending queue upon request,
// schedule all reducers here so that we can observe if reducer requests
// are accepted by RMContainerAllocator on RM side.
allocator.scheduleAllReduces();
allocator.schedule();
Assert.assertEquals(0, mockScheduler.lastAnyAskReduce);
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestTaskAttempt method testAppDiognosticEventOnNewTask.
@Test
public void testAppDiognosticEventOnNewTask() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemorySize()).thenReturn(1024L);
setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), SystemClock.getInstance(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId, "Task got killed"));
assertFalse("InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task", eventHandler.internalError);
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestTaskAttempt method testContainerKillWhileRunning.
@Test
public void testContainerKillWhileRunning() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemorySize()).thenReturn(1024L);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), SystemClock.getInstance(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
assertEquals("Task attempt is not in running state", taImpl.getState(), TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL", eventHandler.internalError);
assertEquals("Task should be in KILL_CONTAINER_CLEANUP state", TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP, taImpl.getInternalState());
}
Aggregations