Search in sources :

Example 1 with CompletedTask

use of org.apache.hadoop.mapreduce.v2.hs.CompletedTask in project hadoop by apache.

the class TestCompletedTask method testTaskStartTimes.

@Test(timeout = 5000)
public void testTaskStartTimes() {
    TaskId taskId = mock(TaskId.class);
    TaskInfo taskInfo = mock(TaskInfo.class);
    Map<TaskAttemptID, TaskAttemptInfo> taskAttempts = new TreeMap<TaskAttemptID, TaskAttemptInfo>();
    TaskAttemptID id = new TaskAttemptID("0", 0, TaskType.MAP, 0, 0);
    TaskAttemptInfo info = mock(TaskAttemptInfo.class);
    when(info.getAttemptId()).thenReturn(id);
    when(info.getStartTime()).thenReturn(10l);
    taskAttempts.put(id, info);
    id = new TaskAttemptID("1", 0, TaskType.MAP, 1, 1);
    info = mock(TaskAttemptInfo.class);
    when(info.getAttemptId()).thenReturn(id);
    when(info.getStartTime()).thenReturn(20l);
    taskAttempts.put(id, info);
    when(taskInfo.getAllTaskAttempts()).thenReturn(taskAttempts);
    CompletedTask task = new CompletedTask(taskId, taskInfo);
    TaskReport report = task.getReport();
    // Make sure the startTime returned by report is the lesser of the 
    // attempy launch times
    assertTrue(report.getStartTime() == 10);
}
Also used : TaskInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskReport(org.apache.hadoop.mapreduce.v2.api.records.TaskReport) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) TaskAttemptInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo) TreeMap(java.util.TreeMap) CompletedTask(org.apache.hadoop.mapreduce.v2.hs.CompletedTask) Test(org.junit.Test)

Example 2 with CompletedTask

use of org.apache.hadoop.mapreduce.v2.hs.CompletedTask in project hadoop by apache.

the class CompletedJob method loadAllTasks.

private void loadAllTasks() {
    if (tasksLoaded.get()) {
        return;
    }
    tasksLock.lock();
    try {
        if (tasksLoaded.get()) {
            return;
        }
        for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
            TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
            TaskInfo taskInfo = entry.getValue();
            Task task = new CompletedTask(yarnTaskID, taskInfo);
            tasks.put(yarnTaskID, task);
            if (task.getType() == TaskType.MAP) {
                mapTasks.put(task.getID(), task);
            } else if (task.getType() == TaskType.REDUCE) {
                reduceTasks.put(task.getID(), task);
            }
        }
        tasksLoaded.set(true);
    } finally {
        tasksLock.unlock();
    }
}
Also used : TaskInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskID(org.apache.hadoop.mapreduce.TaskID) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) HashMap(java.util.HashMap) Map(java.util.Map)

Example 3 with CompletedTask

use of org.apache.hadoop.mapreduce.v2.hs.CompletedTask in project hadoop by apache.

the class TestJobHistoryParsing method testCountersForFailedTask.

@Test(timeout = 60000)
public void testCountersForFailedTask() throws Exception {
    LOG.info("STARTING testCountersForFailedTask");
    try {
        Configuration conf = new Configuration();
        conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
        RackResolver.init(conf);
        MRApp app = new MRAppWithHistoryWithFailedTask(2, 1, true, this.getClass().getName(), true);
        app.submit(conf);
        Job job = app.getContext().getAllJobs().values().iterator().next();
        JobId jobId = job.getID();
        app.waitForState(job, JobState.FAILED);
        // make sure all events are flushed
        app.waitForState(Service.STATE.STOPPED);
        JobHistory jobHistory = new JobHistory();
        jobHistory.init(conf);
        HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
        JobHistoryParser parser;
        JobInfo jobInfo;
        synchronized (fileInfo) {
            Path historyFilePath = fileInfo.getHistoryFile();
            FSDataInputStream in = null;
            FileContext fc = null;
            try {
                fc = FileContext.getFileContext(conf);
                in = fc.open(fc.makeQualified(historyFilePath));
            } catch (IOException ioe) {
                LOG.info("Can not open history file: " + historyFilePath, ioe);
                throw (new Exception("Can not open History File"));
            }
            parser = new JobHistoryParser(in);
            jobInfo = parser.parse();
        }
        Exception parseException = parser.getParseException();
        Assert.assertNull("Caught an expected exception " + parseException, parseException);
        for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
            TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
            CompletedTask ct = new CompletedTask(yarnTaskID, entry.getValue());
            Assert.assertNotNull("completed task report has null counters", ct.getReport().getCounters());
        }
        final List<String> originalDiagnostics = job.getDiagnostics();
        final String historyError = jobInfo.getErrorInfo();
        assertTrue("No original diagnostics for a failed job", originalDiagnostics != null && !originalDiagnostics.isEmpty());
        assertNotNull("No history error info for a failed job ", historyError);
        for (String diagString : originalDiagnostics) {
            assertTrue(historyError.contains(diagString));
        }
    } finally {
        LOG.info("FINISHED testCountersForFailedTask");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) TaskID(org.apache.hadoop.mapreduce.TaskID) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) IOException(java.io.IOException) TaskInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo) JobHistoryParser(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser) JobInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Map(java.util.Map) HashMap(java.util.HashMap) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) FileContext(org.apache.hadoop.fs.FileContext) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Aggregations

TaskInfo (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo)3 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)3 HashMap (java.util.HashMap)2 Map (java.util.Map)2 TaskID (org.apache.hadoop.mapreduce.TaskID)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 TreeMap (java.util.TreeMap)1 Configuration (org.apache.hadoop.conf.Configuration)1 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)1 FileContext (org.apache.hadoop.fs.FileContext)1 Path (org.apache.hadoop.fs.Path)1 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)1 JobHistoryParser (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser)1 JobInfo (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo)1 TaskAttemptInfo (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo)1 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)1 TaskReport (org.apache.hadoop.mapreduce.v2.api.records.TaskReport)1 MRApp (org.apache.hadoop.mapreduce.v2.app.MRApp)1 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)1