Search in sources :

Example 6 with TaskAttemptCompletionEvent

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.

the class TestJobHistoryEntities method testGetTaskAttemptCompletionEvent.

/**
   * Simple test of some methods of CompletedJob
   * @throws Exception
   */
@Test(timeout = 30000)
public void testGetTaskAttemptCompletionEvent() throws Exception {
    HistoryFileInfo info = mock(HistoryFileInfo.class);
    when(info.getConfFile()).thenReturn(fullConfPath);
    completedJob = new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user", info, jobAclsManager);
    TaskCompletionEvent[] events = completedJob.getMapAttemptCompletionEvents(0, 1000);
    assertEquals(10, completedJob.getMapAttemptCompletionEvents(0, 10).length);
    int currentEventId = 0;
    for (TaskCompletionEvent taskAttemptCompletionEvent : events) {
        int eventId = taskAttemptCompletionEvent.getEventId();
        assertTrue(eventId >= currentEventId);
        currentEventId = eventId;
    }
    assertNull(completedJob.loadConfFile());
    // job name
    assertEquals("Sleep job", completedJob.getName());
    // queue name
    assertEquals("default", completedJob.getQueueName());
    // progress
    assertEquals(1.0, completedJob.getProgress(), 0.001);
    // 12 rows in answer
    assertEquals(12, completedJob.getTaskAttemptCompletionEvents(0, 1000).length);
    // select first 10 rows
    assertEquals(10, completedJob.getTaskAttemptCompletionEvents(0, 10).length);
    // select 5-10 rows include 5th
    assertEquals(7, completedJob.getTaskAttemptCompletionEvents(5, 10).length);
    // without errors
    assertEquals(1, completedJob.getDiagnostics().size());
    assertEquals("", completedJob.getDiagnostics().get(0));
    assertEquals(0, completedJob.getJobACLs().size());
}
Also used : HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) Test(org.junit.Test)

Example 7 with TaskAttemptCompletionEvent

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.

the class TaskImpl method handleTaskAttemptCompletion.

// always called inside a transition, in turn inside the Write Lock
private void handleTaskAttemptCompletion(TaskAttemptId attemptId, TaskAttemptCompletionEventStatus status) {
    TaskAttempt attempt = attempts.get(attemptId);
    // to nextAttemptNumber
    if (attempt.getNodeHttpAddress() != null) {
        TaskAttemptCompletionEvent tce = recordFactory.newRecordInstance(TaskAttemptCompletionEvent.class);
        tce.setEventId(-1);
        String scheme = (encryptedShuffle) ? "https://" : "http://";
        tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme + attempt.getNodeHttpAddress().split(":")[0] + ":" + attempt.getShufflePort()));
        tce.setStatus(status);
        tce.setAttemptId(attempt.getID());
        int runTime = 0;
        if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() != 0)
            runTime = (int) (attempt.getFinishTime() - attempt.getLaunchTime());
        tce.setAttemptRunTime(runTime);
        //raise the event to job so that it adds the completion event to its
        //data structures
        eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
    }
}
Also used : JobTaskAttemptCompletedEvent(org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent)

Example 8 with TaskAttemptCompletionEvent

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.

the class MockJobs method newJob.

public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile, boolean hasFailedTasks) {
    final JobId id = newJobID(appID, i);
    final String name = newJobName();
    final JobReport report = newJobReport(id);
    final Map<TaskId, Task> tasks = newTasks(id, n, m, hasFailedTasks);
    final TaskCount taskCount = getTaskCount(tasks.values());
    final Counters counters = getCounters(tasks.values());
    final Path configFile = confFile;
    Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
    final Configuration conf = new Configuration();
    conf.set(JobACL.VIEW_JOB.getAclName(), "testuser");
    conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
    JobACLsManager aclsManager = new JobACLsManager(conf);
    tmpJobACLs = aclsManager.constructJobACLs(conf);
    final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
    return new Job() {

        @Override
        public JobId getID() {
            return id;
        }

        @Override
        public String getName() {
            return name;
        }

        @Override
        public JobState getState() {
            return report.getJobState();
        }

        @Override
        public JobReport getReport() {
            return report;
        }

        @Override
        public float getProgress() {
            return 0;
        }

        @Override
        public Counters getAllCounters() {
            return counters;
        }

        @Override
        public Map<TaskId, Task> getTasks() {
            return tasks;
        }

        @Override
        public Task getTask(TaskId taskID) {
            return tasks.get(taskID);
        }

        @Override
        public int getTotalMaps() {
            return taskCount.maps;
        }

        @Override
        public int getTotalReduces() {
            return taskCount.reduces;
        }

        @Override
        public int getCompletedMaps() {
            return taskCount.completedMaps;
        }

        @Override
        public int getCompletedReduces() {
            return taskCount.completedReduces;
        }

        @Override
        public boolean isUber() {
            return false;
        }

        @Override
        public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(int fromEventId, int maxEvents) {
            return null;
        }

        @Override
        public TaskCompletionEvent[] getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
            return null;
        }

        @Override
        public Map<TaskId, Task> getTasks(TaskType taskType) {
            throw new UnsupportedOperationException("Not supported yet.");
        }

        @Override
        public List<String> getDiagnostics() {
            return Collections.<String>emptyList();
        }

        @Override
        public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
            return true;
        }

        @Override
        public String getUserName() {
            return "mock";
        }

        @Override
        public String getQueueName() {
            return "mockqueue";
        }

        @Override
        public Path getConfFile() {
            return configFile;
        }

        @Override
        public Map<JobACL, AccessControlList> getJobACLs() {
            return jobACLs;
        }

        @Override
        public List<AMInfo> getAMInfos() {
            List<AMInfo> amInfoList = new LinkedList<AMInfo>();
            amInfoList.add(createAMInfo(1));
            amInfoList.add(createAMInfo(2));
            return amInfoList;
        }

        @Override
        public Configuration loadConfFile() throws IOException {
            FileContext fc = FileContext.getFileContext(configFile.toUri(), conf);
            Configuration jobConf = new Configuration(false);
            jobConf.addResource(fc.open(configFile), configFile.toString());
            return jobConf;
        }

        @Override
        public void setQueueName(String queueName) {
        // do nothing
        }

        @Override
        public void setJobPriority(Priority priority) {
        // do nothing
        }
    };
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) JobReport(org.apache.hadoop.mapreduce.v2.api.records.JobReport) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) JobACLsManager(org.apache.hadoop.mapred.JobACLsManager) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Path(org.apache.hadoop.fs.Path) Priority(org.apache.hadoop.yarn.api.records.Priority) LinkedList(java.util.LinkedList) AMInfo(org.apache.hadoop.mapreduce.v2.api.records.AMInfo) Counters(org.apache.hadoop.mapreduce.Counters) JobACL(org.apache.hadoop.mapreduce.JobACL) FileContext(org.apache.hadoop.fs.FileContext)

Example 9 with TaskAttemptCompletionEvent

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.

the class TestTaskAttemptListenerImpl method testGetMapCompletionEvents.

@Test(timeout = 10000)
public void testGetMapCompletionEvents() throws IOException {
    TaskAttemptCompletionEvent[] empty = {};
    TaskAttemptCompletionEvent[] taskEvents = { createTce(0, true, TaskAttemptCompletionEventStatus.OBSOLETE), createTce(1, false, TaskAttemptCompletionEventStatus.FAILED), createTce(2, true, TaskAttemptCompletionEventStatus.SUCCEEDED), createTce(3, false, TaskAttemptCompletionEventStatus.FAILED) };
    TaskAttemptCompletionEvent[] mapEvents = { taskEvents[0], taskEvents[2] };
    Job mockJob = mock(Job.class);
    when(mockJob.getTaskAttemptCompletionEvents(0, 100)).thenReturn(taskEvents);
    when(mockJob.getTaskAttemptCompletionEvents(0, 2)).thenReturn(Arrays.copyOfRange(taskEvents, 0, 2));
    when(mockJob.getTaskAttemptCompletionEvents(2, 100)).thenReturn(Arrays.copyOfRange(taskEvents, 2, 4));
    when(mockJob.getMapAttemptCompletionEvents(0, 100)).thenReturn(TypeConverter.fromYarn(mapEvents));
    when(mockJob.getMapAttemptCompletionEvents(0, 2)).thenReturn(TypeConverter.fromYarn(mapEvents));
    when(mockJob.getMapAttemptCompletionEvents(2, 100)).thenReturn(TypeConverter.fromYarn(empty));
    AppContext appCtx = mock(AppContext.class);
    when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
    JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
    RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
    final TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class);
    Dispatcher dispatcher = mock(Dispatcher.class);
    @SuppressWarnings("unchecked") EventHandler<Event> ea = mock(EventHandler.class);
    when(dispatcher.getEventHandler()).thenReturn(ea);
    when(appCtx.getEventHandler()).thenReturn(ea);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(appCtx);
    TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, policy) {

        @Override
        protected void registerHeartbeatHandler(Configuration conf) {
            taskHeartbeatHandler = hbHandler;
        }
    };
    Configuration conf = new Configuration();
    listener.init(conf);
    listener.start();
    JobID jid = new JobID("12345", 1);
    TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 0);
    MapTaskCompletionEventsUpdate update = listener.getMapCompletionEvents(jid, 0, 100, tid);
    assertEquals(2, update.events.length);
    update = listener.getMapCompletionEvents(jid, 0, 2, tid);
    assertEquals(2, update.events.length);
    update = listener.getMapCompletionEvents(jid, 2, 100, tid);
    assertEquals(0, update.events.length);
}
Also used : RMHeartbeatHandler(org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler) Configuration(org.apache.hadoop.conf.Configuration) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) JobTokenSecretManager(org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager) TaskHeartbeatHandler(org.apache.hadoop.mapreduce.v2.app.TaskHeartbeatHandler) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) Event(org.apache.hadoop.yarn.event.Event) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 10 with TaskAttemptCompletionEvent

use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.

the class TestFetchFailure method testFetchFailureMultipleReduces.

@Test
public void testFetchFailureMultipleReduces() throws Exception {
    MRApp app = new MRApp(1, 3, false, this.getClass().getName(), true);
    Configuration conf = new Configuration();
    // map -> reduce -> fetch-failure -> map retry is incompatible with
    // sequential, single-task-attempt approach in uber-AM, so disable:
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    //all maps would be running
    Assert.assertEquals("Num tasks not correct", 4, job.getTasks().size());
    Iterator<Task> it = job.getTasks().values().iterator();
    Task mapTask = it.next();
    Task reduceTask = it.next();
    Task reduceTask2 = it.next();
    Task reduceTask3 = it.next();
    //wait for Task state move to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
    app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
    //send the done signal to the map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 1, events.length);
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
    // wait for reduce to start running
    app.waitForState(reduceTask, TaskState.RUNNING);
    app.waitForState(reduceTask2, TaskState.RUNNING);
    app.waitForState(reduceTask3, TaskState.RUNNING);
    TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
    updateStatus(app, reduceAttempt, Phase.SHUFFLE);
    TaskAttempt reduceAttempt2 = reduceTask2.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt2, TaskAttemptState.RUNNING);
    updateStatus(app, reduceAttempt2, Phase.SHUFFLE);
    TaskAttempt reduceAttempt3 = reduceTask3.getAttempts().values().iterator().next();
    app.waitForState(reduceAttempt3, TaskAttemptState.RUNNING);
    updateStatus(app, reduceAttempt3, Phase.SHUFFLE);
    //send 2 fetch failures from reduce to prepare for map re execution
    sendFetchFailure(app, reduceAttempt, mapAttempt1, "host1");
    sendFetchFailure(app, reduceAttempt2, mapAttempt1, "host2");
    //We should not re-launch the map task yet
    assertEquals(TaskState.SUCCEEDED, mapTask.getState());
    updateStatus(app, reduceAttempt2, Phase.REDUCE);
    updateStatus(app, reduceAttempt3, Phase.REDUCE);
    //send 3rd fetch failures from reduce to trigger map re execution
    sendFetchFailure(app, reduceAttempt3, mapAttempt1, "host3");
    //wait for map Task state move back to RUNNING
    app.waitForState(mapTask, TaskState.RUNNING);
    //map attempt must have become FAILED
    Assert.assertEquals("Map TaskAttempt state not correct", TaskAttemptState.FAILED, mapAttempt1.getState());
    Assert.assertEquals(mapAttempt1.getDiagnostics().get(0), "Too many fetch failures. Failing the attempt. " + "Last failure reported by " + reduceAttempt3.getID().toString() + " from host host3");
    Assert.assertEquals("Num attempts in Map Task not correct", 2, mapTask.getAttempts().size());
    Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
    atIt.next();
    TaskAttempt mapAttempt2 = atIt.next();
    app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
    //send the done signal to the second map attempt
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(), TaskAttemptEventType.TA_DONE));
    // wait for map success
    app.waitForState(mapTask, TaskState.SUCCEEDED);
    //send done to reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(), TaskAttemptEventType.TA_DONE));
    //send done to reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt2.getID(), TaskAttemptEventType.TA_DONE));
    //send done to reduce
    app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt3.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(job, JobState.SUCCEEDED);
    //previous completion event now becomes obsolete
    Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    events = job.getTaskAttemptCompletionEvents(0, 100);
    Assert.assertEquals("Num completion events not correct", 6, events.length);
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[0].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[1].getAttemptId());
    Assert.assertEquals("Event map attempt id not correct", mapAttempt2.getID(), events[2].getAttemptId());
    Assert.assertEquals("Event reduce attempt id not correct", reduceAttempt.getID(), events[3].getAttemptId());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
    Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
    Assert.assertEquals("Event status not correct for map attempt2", TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
    Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
    TaskCompletionEvent[] mapEvents = job.getMapAttemptCompletionEvents(0, 2);
    TaskCompletionEvent[] convertedEvents = TypeConverter.fromYarn(events);
    Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
    Assert.assertArrayEquals("Unexpected map events", Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
    mapEvents = job.getMapAttemptCompletionEvents(2, 200);
    Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
    Assert.assertEquals("Unexpected map event", convertedEvents[2], mapEvents[0]);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) Test(org.junit.Test)

Aggregations

TaskAttemptCompletionEvent (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent)13 Configuration (org.apache.hadoop.conf.Configuration)7 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)7 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)7 Test (org.junit.Test)7 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)6 TaskCompletionEvent (org.apache.hadoop.mapred.TaskCompletionEvent)5 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)5 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)4 HashMap (java.util.HashMap)3 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)3 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)3 ArrayList (java.util.ArrayList)2 LinkedList (java.util.LinkedList)2 Map (java.util.Map)2 JobTaskAttemptCompletedEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent)2 JobUpdatedNodesEvent (org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent)2 Dispatcher (org.apache.hadoop.yarn.event.Dispatcher)2 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1