Search in sources :

Example 16 with TaskAttemptContainerAssignedEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent in project hadoop by apache.

the class TestTaskAttempt method testDoubleTooManyFetchFailure.

@Test
public void testDoubleTooManyFetchFailure() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    TaskId reduceTaskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.REDUCE);
    TaskAttemptId reduceTAId = MRBuilderUtils.newTaskAttemptId(reduceTaskId, 0);
    Path jobFile = mock(Path.class);
    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
    JobConf jobConf = new JobConf();
    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    jobConf.setBoolean("fs.file.impl.disable.cache", true);
    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
    when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
    AppContext appCtx = mock(AppContext.class);
    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    Resource resource = mock(Resource.class);
    when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
    when(resource.getMemorySize()).thenReturn(1024L);
    setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
    TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), SystemClock.getInstance(), appCtx);
    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    when(container.getNodeHttpAddress()).thenReturn("localhost:0");
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE));
    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class)));
    taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_DONE));
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_COMPLETED));
    assertEquals("Task attempt is not in succeeded state", taImpl.getState(), TaskAttemptState.SUCCEEDED);
    taImpl.handle(new TaskAttemptTooManyFetchFailureEvent(attemptId, reduceTAId, "Host"));
    assertEquals("Task attempt is not in FAILED state", taImpl.getState(), TaskAttemptState.FAILED);
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
    assertEquals("Task attempt is not in FAILED state, still", taImpl.getState(), TaskAttemptState.FAILED);
    assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED", eventHandler.internalError);
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptListener(org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener) InetSocketAddress(java.net.InetSocketAddress) Token(org.apache.hadoop.security.token.Token) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent) Container(org.apache.hadoop.yarn.api.records.Container) TaskAttemptTooManyFetchFailureEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptTooManyFetchFailureEvent) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Path(org.apache.hadoop.fs.Path) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) Resource(org.apache.hadoop.yarn.api.records.Resource) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) TaskAttemptContainerLaunchedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent) ClusterInfo(org.apache.hadoop.mapreduce.v2.app.ClusterInfo) NodeId(org.apache.hadoop.yarn.api.records.NodeId) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 17 with TaskAttemptContainerAssignedEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent in project hadoop by apache.

the class TestTaskAttempt method testContainerKillAfterAssigned.

@Test
public void testContainerKillAfterAssigned() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    Path jobFile = mock(Path.class);
    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
    JobConf jobConf = new JobConf();
    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    jobConf.setBoolean("fs.file.impl.disable.cache", true);
    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
    when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
    AppContext appCtx = mock(AppContext.class);
    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    Resource resource = mock(Resource.class);
    when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
    when(resource.getMemorySize()).thenReturn(1024L);
    TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), SystemClock.getInstance(), appCtx);
    NodeId nid = NodeId.newInstance("127.0.0.2", 0);
    ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    when(container.getNodeHttpAddress()).thenReturn("localhost:0");
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE));
    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class)));
    assertEquals("Task attempt is not in assinged state", taImpl.getInternalState(), TaskAttemptStateInternal.ASSIGNED);
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL));
    assertEquals("Task should be in KILL_CONTAINER_CLEANUP state", TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP, taImpl.getInternalState());
}
Also used : Path(org.apache.hadoop.fs.Path) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptListener(org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener) InetSocketAddress(java.net.InetSocketAddress) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) Resource(org.apache.hadoop.yarn.api.records.Resource) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Token(org.apache.hadoop.security.token.Token) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent) ClusterInfo(org.apache.hadoop.mapreduce.v2.app.ClusterInfo) Container(org.apache.hadoop.yarn.api.records.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 18 with TaskAttemptContainerAssignedEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent in project hadoop by apache.

the class TestTaskAttempt method createTaskAttemptImpl.

private TaskAttemptImpl createTaskAttemptImpl(MockEventHandler eventHandler) {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    Path jobFile = mock(Path.class);
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
    JobConf jobConf = new JobConf();
    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    jobConf.setBoolean("fs.file.impl.disable.cache", true);
    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
    when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
    AppContext appCtx = mock(AppContext.class);
    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
    setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
    TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, mock(Token.class), new Credentials(), SystemClock.getInstance(), appCtx);
    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    when(container.getNodeHttpAddress()).thenReturn("localhost:0");
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE));
    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class)));
    taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
    return taImpl;
}
Also used : Path(org.apache.hadoop.fs.Path) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptListener(org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener) InetSocketAddress(java.net.InetSocketAddress) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Token(org.apache.hadoop.security.token.Token) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent) TaskAttemptContainerLaunchedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent) ClusterInfo(org.apache.hadoop.mapreduce.v2.app.ClusterInfo) Container(org.apache.hadoop.yarn.api.records.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Credentials(org.apache.hadoop.security.Credentials)

Example 19 with TaskAttemptContainerAssignedEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent in project hadoop by apache.

the class TestTaskAttempt method testContainerCleanedWhileCommitting.

@Test
public void testContainerCleanedWhileCommitting() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    Path jobFile = mock(Path.class);
    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
    JobConf jobConf = new JobConf();
    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    jobConf.setBoolean("fs.file.impl.disable.cache", true);
    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
    when(splits.getLocations()).thenReturn(new String[] {});
    AppContext appCtx = mock(AppContext.class);
    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    Resource resource = mock(Resource.class);
    when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
    when(resource.getMemorySize()).thenReturn(1024L);
    setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
    TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), SystemClock.getInstance(), appCtx);
    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    when(container.getNodeHttpAddress()).thenReturn("localhost:0");
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE));
    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class)));
    taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_COMMIT_PENDING));
    assertEquals("Task attempt is not in commit pending state", taImpl.getState(), TaskAttemptState.COMMIT_PENDING);
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_CLEANED));
    assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED", eventHandler.internalError);
    assertEquals("Task attempt is assigned locally", Locality.OFF_SWITCH, taImpl.getLocality());
}
Also used : Path(org.apache.hadoop.fs.Path) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptListener(org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener) InetSocketAddress(java.net.InetSocketAddress) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) Resource(org.apache.hadoop.yarn.api.records.Resource) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Token(org.apache.hadoop.security.token.Token) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent) TaskAttemptContainerLaunchedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent) ClusterInfo(org.apache.hadoop.mapreduce.v2.app.ClusterInfo) Container(org.apache.hadoop.yarn.api.records.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 20 with TaskAttemptContainerAssignedEvent

use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent in project hadoop by apache.

the class TestRMContainerAllocator method testMapNodeLocality.

@Test
public void testMapNodeLocality() throws Exception {
    // test checks that ordering of allocated containers list from the RM does 
    // not affect the map->container assignment done by the AM. If there is a 
    // node local container available for a map then it should be assigned to 
    // that container and not a rack-local container that happened to be seen 
    // earlier in the allocated containers list from the RM.
    // Regression test for MAPREDUCE-4893
    LOG.info("Running testMapNodeLocality");
    Configuration conf = new Configuration();
    MyResourceManager rm = new MyResourceManager(conf);
    rm.start();
    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext().getDispatcher();
    // Submit the application
    RMApp app = rm.submitApp(1024);
    dispatcher.await();
    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
    amNodeManager.nodeHeartbeat(true);
    dispatcher.await();
    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId();
    rm.sendAMLaunched(appAttemptId);
    dispatcher.await();
    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
    Job mockJob = mock(Job.class);
    when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0, 0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf, appAttemptId, mockJob);
    // add resources to scheduler
    // can assign 2 maps 
    MockNM nodeManager1 = rm.registerNode("h1:1234", 3072);
    // wont heartbeat on node local node
    rm.registerNode("h2:1234", 10240);
    // assign 1 map
    MockNM nodeManager3 = rm.registerNode("h3:1234", 1536);
    dispatcher.await();
    // create the container requests for maps
    ContainerRequestEvent event1 = createReq(jobId, 1, 1024, new String[] { "h1" });
    allocator.sendRequest(event1);
    ContainerRequestEvent event2 = createReq(jobId, 2, 1024, new String[] { "h1" });
    allocator.sendRequest(event2);
    ContainerRequestEvent event3 = createReq(jobId, 3, 1024, new String[] { "h2" });
    allocator.sendRequest(event3);
    // this tells the scheduler about the requests
    // as nodes are not added, no allocations
    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
    dispatcher.await();
    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
    // update resources in scheduler
    // Node heartbeat from rack-local first. This makes node h3 the first in the
    // list of allocated containers but it should not be assigned to task1.
    nodeManager3.nodeHeartbeat(true);
    // Node heartbeat from node-local next. This allocates 2 node local 
    // containers for task1 and task2. These should be matched with those tasks.
    nodeManager1.nodeHeartbeat(true);
    dispatcher.await();
    assigned = allocator.schedule();
    dispatcher.await();
    checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 }, assigned, false);
    // remove the rack-local assignment that should have happened for task3
    for (TaskAttemptContainerAssignedEvent event : assigned) {
        if (event.getTaskAttemptID().equals(event3.getAttemptID())) {
            assigned.remove(event);
            Assert.assertTrue(event.getContainer().getNodeId().getHost().equals("h3"));
            break;
        }
    }
    checkAssignments(new ContainerRequestEvent[] { event1, event2 }, assigned, true);
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent) Test(org.junit.Test)

Aggregations

TaskAttemptContainerAssignedEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent)22 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)19 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)19 Test (org.junit.Test)19 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)13 Container (org.apache.hadoop.yarn.api.records.Container)12 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)11 NodeId (org.apache.hadoop.yarn.api.records.NodeId)11 InetSocketAddress (java.net.InetSocketAddress)10 Path (org.apache.hadoop.fs.Path)10 JobConf (org.apache.hadoop.mapred.JobConf)10 MapTaskAttemptImpl (org.apache.hadoop.mapred.MapTaskAttemptImpl)10 TaskSplitMetaInfo (org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo)10 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)10 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)10 TaskAttemptListener (org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener)10 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)10 Credentials (org.apache.hadoop.security.Credentials)10 Token (org.apache.hadoop.security.token.Token)10 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)10