Search in sources :

Example 11 with Container

use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.

the class TestTaskAttempt method testFetchFailureAttemptFinishTime.

@Test
public void testFetchFailureAttemptFinishTime() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    TaskId reducetaskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.REDUCE);
    TaskAttemptId reduceTAId = MRBuilderUtils.newTaskAttemptId(reducetaskId, 0);
    Path jobFile = mock(Path.class);
    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
    JobConf jobConf = new JobConf();
    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    jobConf.setBoolean("fs.file.impl.disable.cache", true);
    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
    when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
    AppContext appCtx = mock(AppContext.class);
    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
    setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
    TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, mock(Token.class), new Credentials(), SystemClock.getInstance(), appCtx);
    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    when(container.getNodeHttpAddress()).thenReturn("localhost:0");
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE));
    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class)));
    taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_DONE));
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_COMPLETED));
    assertEquals("Task attempt is not in succeeded state", taImpl.getState(), TaskAttemptState.SUCCEEDED);
    assertTrue("Task Attempt finish time is not greater than 0", taImpl.getFinishTime() > 0);
    Long finishTime = taImpl.getFinishTime();
    Thread.sleep(5);
    taImpl.handle(new TaskAttemptTooManyFetchFailureEvent(attemptId, reduceTAId, "Host"));
    assertEquals("Task attempt is not in Too Many Fetch Failure state", taImpl.getState(), TaskAttemptState.FAILED);
    assertEquals("After TA_TOO_MANY_FETCH_FAILURE," + " Task attempt finish time is not the same ", finishTime, Long.valueOf(taImpl.getFinishTime()));
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptListener(org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener) InetSocketAddress(java.net.InetSocketAddress) Token(org.apache.hadoop.security.token.Token) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent) Container(org.apache.hadoop.yarn.api.records.Container) TaskAttemptTooManyFetchFailureEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptTooManyFetchFailureEvent) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Path(org.apache.hadoop.fs.Path) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) TaskAttemptContainerLaunchedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent) ClusterInfo(org.apache.hadoop.mapreduce.v2.app.ClusterInfo) NodeId(org.apache.hadoop.yarn.api.records.NodeId) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 12 with Container

use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.

the class TestTaskAttempt method testContainerKillWhileCommitPending.

@Test
public void testContainerKillWhileCommitPending() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    Path jobFile = mock(Path.class);
    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
    JobConf jobConf = new JobConf();
    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    jobConf.setBoolean("fs.file.impl.disable.cache", true);
    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
    when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
    AppContext appCtx = mock(AppContext.class);
    ClusterInfo clusterInfo = mock(ClusterInfo.class);
    Resource resource = mock(Resource.class);
    when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
    when(resource.getMemorySize()).thenReturn(1024L);
    TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), SystemClock.getInstance(), appCtx);
    NodeId nid = NodeId.newInstance("127.0.0.2", 0);
    ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    when(container.getNodeHttpAddress()).thenReturn("localhost:0");
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE));
    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class)));
    taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
    assertEquals("Task attempt is not in running state", taImpl.getState(), TaskAttemptState.RUNNING);
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_COMMIT_PENDING));
    assertEquals("Task should be in COMMIT_PENDING state", TaskAttemptStateInternal.COMMIT_PENDING, taImpl.getInternalState());
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL));
    assertFalse("InternalError occurred trying to handle TA_KILL", eventHandler.internalError);
    assertEquals("Task should be in KILL_CONTAINER_CLEANUP state", TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP, taImpl.getInternalState());
}
Also used : Path(org.apache.hadoop.fs.Path) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptListener(org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener) InetSocketAddress(java.net.InetSocketAddress) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) Resource(org.apache.hadoop.yarn.api.records.Resource) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Token(org.apache.hadoop.security.token.Token) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent) TaskAttemptContainerLaunchedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent) ClusterInfo(org.apache.hadoop.mapreduce.v2.app.ClusterInfo) Container(org.apache.hadoop.yarn.api.records.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 13 with Container

use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.

the class TestTaskAttempt method testLaunchFailedWhileKilling.

@Test
public void testLaunchFailedWhileKilling() throws Exception {
    ApplicationId appId = ApplicationId.newInstance(1, 2);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    Path jobFile = mock(Path.class);
    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptListener taListener = mock(TaskAttemptListener.class);
    when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
    JobConf jobConf = new JobConf();
    jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
    jobConf.setBoolean("fs.file.impl.disable.cache", true);
    jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
    jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
    TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
    when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
    TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), new Credentials(), SystemClock.getInstance(), null);
    NodeId nid = NodeId.newInstance("127.0.0.1", 0);
    ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
    Container container = mock(Container.class);
    when(container.getId()).thenReturn(contId);
    when(container.getNodeId()).thenReturn(nid);
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_SCHEDULE));
    taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container, mock(Map.class)));
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_KILL));
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_CLEANED));
    taImpl.handle(new TaskAttemptEvent(attemptId, TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
    assertFalse(eventHandler.internalError);
    assertEquals("Task attempt is not assigned on the local node", Locality.NODE_LOCAL, taImpl.getLocality());
}
Also used : Path(org.apache.hadoop.fs.Path) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) TaskAttemptListener(org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener) InetSocketAddress(java.net.InetSocketAddress) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) Token(org.apache.hadoop.security.token.Token) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) TaskAttemptContainerAssignedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent) Container(org.apache.hadoop.yarn.api.records.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) TaskSplitMetaInfo(org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) MapTaskAttemptImpl(org.apache.hadoop.mapred.MapTaskAttemptImpl) JobConf(org.apache.hadoop.mapred.JobConf) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 14 with Container

use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.

the class TestCheckpointPreemptionPolicy method testStrictPreemptionContract.

@Test
public void testStrictPreemptionContract() {
    final Map<ContainerId, TaskAttemptId> containers = assignedContainers;
    AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {

        @Override
        public TaskAttemptId getTaskAttempt(ContainerId cId) {
            return containers.get(cId);
        }

        @Override
        public List<Container> getContainers(TaskType t) {
            List<Container> p = new ArrayList<Container>();
            for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
                if (ent.getValue().getTaskId().getTaskType().equals(t)) {
                    p.add(Container.newInstance(ent.getKey(), null, null, contToResourceMap.get(ent.getKey()), Priority.newInstance(0), null));
                }
            }
            return p;
        }
    };
    PreemptionMessage pM = generatePreemptionMessage(preemptedContainers, contToResourceMap, Resource.newInstance(1024, 1), true);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(mActxt);
    policy.preempt(mPctxt, pM);
    for (ContainerId c : preemptedContainers) {
        TaskAttemptId t = assignedContainers.get(c);
        if (TaskType.MAP.equals(t.getTaskId().getTaskType())) {
            assert policy.isPreempted(t) == false;
        } else {
            assert policy.isPreempted(t);
        }
    }
}
Also used : RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) PreemptionMessage(org.apache.hadoop.yarn.api.records.PreemptionMessage) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) ArrayList(java.util.ArrayList) PreemptionContainer(org.apache.hadoop.yarn.api.records.PreemptionContainer) Container(org.apache.hadoop.yarn.api.records.Container) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 15 with Container

use of org.apache.hadoop.yarn.api.records.Container in project hadoop by apache.

the class TestCheckpointPreemptionPolicy method testPreemptionContract.

@Test
public void testPreemptionContract() {
    final Map<ContainerId, TaskAttemptId> containers = assignedContainers;
    AMPreemptionPolicy.Context mPctxt = new AMPreemptionPolicy.Context() {

        @Override
        public TaskAttemptId getTaskAttempt(ContainerId cId) {
            return containers.get(cId);
        }

        @Override
        public List<Container> getContainers(TaskType t) {
            List<Container> p = new ArrayList<Container>();
            for (Map.Entry<ContainerId, TaskAttemptId> ent : assignedContainers.entrySet()) {
                if (ent.getValue().getTaskId().getTaskType().equals(t)) {
                    p.add(Container.newInstance(ent.getKey(), null, null, contToResourceMap.get(ent.getKey()), Priority.newInstance(0), null));
                }
            }
            return p;
        }
    };
    PreemptionMessage pM = generatePreemptionMessage(preemptedContainers, contToResourceMap, Resource.newInstance(minAlloc, 1), false);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(mActxt);
    int supposedMemPreemption = (int) pM.getContract().getResourceRequest().get(0).getResourceRequest().getCapability().getMemorySize() * pM.getContract().getResourceRequest().get(0).getResourceRequest().getNumContainers();
    // first round of preemption
    policy.preempt(mPctxt, pM);
    List<TaskAttemptId> preempting = validatePreemption(pM, policy, supposedMemPreemption);
    // redundant message
    policy.preempt(mPctxt, pM);
    List<TaskAttemptId> preempting2 = validatePreemption(pM, policy, supposedMemPreemption);
    // check that nothing got added
    assert preempting2.equals(preempting);
    // simulate 2 task completions/successful preemption
    policy.handleCompletedContainer(preempting.get(0));
    policy.handleCompletedContainer(preempting.get(1));
    // remove from assignedContainers
    Iterator<Map.Entry<ContainerId, TaskAttemptId>> it = assignedContainers.entrySet().iterator();
    while (it.hasNext()) {
        Map.Entry<ContainerId, TaskAttemptId> ent = it.next();
        if (ent.getValue().equals(preempting.get(0)) || ent.getValue().equals(preempting.get(1)))
            it.remove();
    }
    // one more message asking for preemption
    policy.preempt(mPctxt, pM);
    // triggers preemption of 2 more containers (i.e., the preemption set changes)
    List<TaskAttemptId> preempting3 = validatePreemption(pM, policy, supposedMemPreemption);
    assert preempting3.equals(preempting2) == false;
}
Also used : RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) PreemptionMessage(org.apache.hadoop.yarn.api.records.PreemptionMessage) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) ArrayList(java.util.ArrayList) PreemptionContainer(org.apache.hadoop.yarn.api.records.PreemptionContainer) Container(org.apache.hadoop.yarn.api.records.Container) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Aggregations

Container (org.apache.hadoop.yarn.api.records.Container)242 Test (org.junit.Test)128 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)109 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)105 ArrayList (java.util.ArrayList)55 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)55 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)48 AllocateResponse (org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse)44 NodeId (org.apache.hadoop.yarn.api.records.NodeId)44 Resource (org.apache.hadoop.yarn.api.records.Resource)41 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)38 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)38 ContainerStatus (org.apache.hadoop.yarn.api.records.ContainerStatus)36 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)35 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)31 RMAppAttempt (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt)31 HashMap (java.util.HashMap)26 MockAM (org.apache.hadoop.yarn.server.resourcemanager.MockAM)26 MockNM (org.apache.hadoop.yarn.server.resourcemanager.MockNM)26 Priority (org.apache.hadoop.yarn.api.records.Priority)25