Search in sources :

Example 11 with JobInfo

use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.

the class HsJobsBlock method render.

/*
   * (non-Javadoc)
   * @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
   */
@Override
protected void render(Block html) {
    TBODY<TABLE<Hamlet>> tbody = html.h2("Retired Jobs").table("#jobs").thead().tr().th("Submit Time").th("Start Time").th("Finish Time").th(".id", "Job ID").th(".name", "Name").th("User").th("Queue").th(".state", "State").th("Maps Total").th("Maps Completed").th("Reduces Total").th("Reduces Completed").th("Elapsed Time")._()._().tbody();
    LOG.info("Getting list of all Jobs.");
    // Write all the data into a JavaScript array of arrays for JQuery
    // DataTables to display
    StringBuilder jobsTableData = new StringBuilder("[\n");
    for (Job j : appContext.getAllJobs().values()) {
        JobInfo job = new JobInfo(j);
        jobsTableData.append("[\"").append(dateFormat.format(new Date(job.getSubmitTime()))).append("\",\"").append(job.getFormattedStartTimeStr(dateFormat)).append("\",\"").append(dateFormat.format(new Date(job.getFinishTime()))).append("\",\"").append("<a href='").append(url("job", job.getId())).append("'>").append(job.getId()).append("</a>\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(job.getName()))).append("\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(job.getUserName()))).append("\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(job.getQueueName()))).append("\",\"").append(job.getState()).append("\",\"").append(String.valueOf(job.getMapsTotal())).append("\",\"").append(String.valueOf(job.getMapsCompleted())).append("\",\"").append(String.valueOf(job.getReducesTotal())).append("\",\"").append(String.valueOf(job.getReducesCompleted())).append("\",\"").append(StringUtils.formatTimeSortable(Times.elapsed(job.getStartTime(), job.getFinishTime(), false))).append("\"],\n");
    }
    //Remove the last comma and close off the array of arrays
    if (jobsTableData.charAt(jobsTableData.length() - 2) == ',') {
        jobsTableData.delete(jobsTableData.length() - 2, jobsTableData.length() - 1);
    }
    jobsTableData.append("]");
    html.script().$type("text/javascript")._("var jobsTableData=" + jobsTableData)._();
    tbody._().tfoot().tr().th().input("search_init").$type(InputType.text).$name("submit_time").$value("Submit Time")._()._().th().input("search_init").$type(InputType.text).$name("start_time").$value("Start Time")._()._().th().input("search_init").$type(InputType.text).$name("finish_time").$value("Finish Time")._()._().th().input("search_init").$type(InputType.text).$name("job_id").$value("Job ID")._()._().th().input("search_init").$type(InputType.text).$name("name").$value("Name")._()._().th().input("search_init").$type(InputType.text).$name("user").$value("User")._()._().th().input("search_init").$type(InputType.text).$name("queue").$value("Queue")._()._().th().input("search_init").$type(InputType.text).$name("state").$value("State")._()._().th().input("search_init").$type(InputType.text).$name("maps_total").$value("Maps Total")._()._().th().input("search_init").$type(InputType.text).$name("maps_completed").$value("Maps Completed")._()._().th().input("search_init").$type(InputType.text).$name("reduces_total").$value("Reduces Total")._()._().th().input("search_init").$type(InputType.text).$name("reduces_completed").$value("Reduces Completed")._()._().th().input("search_init").$type(InputType.text).$name("elapsed_time").$value("Elapsed Time")._()._()._()._()._();
}
Also used : TABLE(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE) JobInfo(org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Date(java.util.Date)

Example 12 with JobInfo

use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.

the class HsWebServices method getJob.

@GET
@Path("/mapreduce/jobs/{jobid}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public JobInfo getJob(@Context HttpServletRequest hsr, @PathParam("jobid") String jid) {
    init();
    Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
    checkAccess(job, hsr);
    return new JobInfo(job);
}
Also used : JobInfo(org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 13 with JobInfo

use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.

the class CachedHistoryStorage method getPartialJobs.

public static JobsInfo getPartialJobs(Collection<Job> jobs, Long offset, Long count, String user, String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd, JobState jobState) {
    JobsInfo allJobs = new JobsInfo();
    if (sBegin == null || sBegin < 0)
        sBegin = 0l;
    if (sEnd == null)
        sEnd = Long.MAX_VALUE;
    if (fBegin == null || fBegin < 0)
        fBegin = 0l;
    if (fEnd == null)
        fEnd = Long.MAX_VALUE;
    if (offset == null || offset < 0)
        offset = 0l;
    if (count == null)
        count = Long.MAX_VALUE;
    if (offset > jobs.size()) {
        return allJobs;
    }
    long at = 0;
    long end = offset + count - 1;
    if (end < 0) {
        // due to overflow
        end = Long.MAX_VALUE;
    }
    for (Job job : jobs) {
        if (at > end) {
            break;
        }
        // can't really validate queue is a valid one since queues could change
        if (queue != null && !queue.isEmpty()) {
            if (!job.getQueueName().equals(queue)) {
                continue;
            }
        }
        if (user != null && !user.isEmpty()) {
            if (!job.getUserName().equals(user)) {
                continue;
            }
        }
        JobReport report = job.getReport();
        if (report.getStartTime() < sBegin || report.getStartTime() > sEnd) {
            continue;
        }
        if (report.getFinishTime() < fBegin || report.getFinishTime() > fEnd) {
            continue;
        }
        if (jobState != null && jobState != report.getJobState()) {
            continue;
        }
        at++;
        if ((at - 1) < offset) {
            continue;
        }
        JobInfo jobInfo = new JobInfo(job);
        allJobs.add(jobInfo);
    }
    return allJobs;
}
Also used : JobInfo(org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo) JobsInfo(org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobReport(org.apache.hadoop.mapreduce.v2.api.records.JobReport)

Example 14 with JobInfo

use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.

the class TestJobHistoryParsing method testDiagnosticsForKilledJob.

@Test(timeout = 60000)
public void testDiagnosticsForKilledJob() throws Exception {
    LOG.info("STARTING testDiagnosticsForKilledJob");
    try {
        final Configuration conf = new Configuration();
        conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
        RackResolver.init(conf);
        MRApp app = new MRAppWithHistoryWithJobKilled(2, 1, true, this.getClass().getName(), true);
        app.submit(conf);
        Job job = app.getContext().getAllJobs().values().iterator().next();
        JobId jobId = job.getID();
        app.waitForState(job, JobState.KILLED);
        // make sure all events are flushed
        app.waitForState(Service.STATE.STOPPED);
        JobHistory jobHistory = new JobHistory();
        jobHistory.init(conf);
        HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
        JobHistoryParser parser;
        JobInfo jobInfo;
        synchronized (fileInfo) {
            Path historyFilePath = fileInfo.getHistoryFile();
            FSDataInputStream in = null;
            FileContext fc = null;
            try {
                fc = FileContext.getFileContext(conf);
                in = fc.open(fc.makeQualified(historyFilePath));
            } catch (IOException ioe) {
                LOG.info("Can not open history file: " + historyFilePath, ioe);
                throw (new Exception("Can not open History File"));
            }
            parser = new JobHistoryParser(in);
            jobInfo = parser.parse();
        }
        Exception parseException = parser.getParseException();
        assertNull("Caught an expected exception " + parseException, parseException);
        final List<String> originalDiagnostics = job.getDiagnostics();
        final String historyError = jobInfo.getErrorInfo();
        assertTrue("No original diagnostics for a failed job", originalDiagnostics != null && !originalDiagnostics.isEmpty());
        assertNotNull("No history error info for a failed job ", historyError);
        for (String diagString : originalDiagnostics) {
            assertTrue(historyError.contains(diagString));
        }
        assertTrue("No killed message in diagnostics", historyError.contains(JobImpl.JOB_KILLED_DIAG));
    } finally {
        LOG.info("FINISHED testDiagnosticsForKilledJob");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) IOException(java.io.IOException) JobHistoryParser(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser) JobInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) FileContext(org.apache.hadoop.fs.FileContext) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Example 15 with JobInfo

use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.

the class TestJobHistoryParsing method testCountersForFailedTask.

@Test(timeout = 60000)
public void testCountersForFailedTask() throws Exception {
    LOG.info("STARTING testCountersForFailedTask");
    try {
        Configuration conf = new Configuration();
        conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
        RackResolver.init(conf);
        MRApp app = new MRAppWithHistoryWithFailedTask(2, 1, true, this.getClass().getName(), true);
        app.submit(conf);
        Job job = app.getContext().getAllJobs().values().iterator().next();
        JobId jobId = job.getID();
        app.waitForState(job, JobState.FAILED);
        // make sure all events are flushed
        app.waitForState(Service.STATE.STOPPED);
        JobHistory jobHistory = new JobHistory();
        jobHistory.init(conf);
        HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
        JobHistoryParser parser;
        JobInfo jobInfo;
        synchronized (fileInfo) {
            Path historyFilePath = fileInfo.getHistoryFile();
            FSDataInputStream in = null;
            FileContext fc = null;
            try {
                fc = FileContext.getFileContext(conf);
                in = fc.open(fc.makeQualified(historyFilePath));
            } catch (IOException ioe) {
                LOG.info("Can not open history file: " + historyFilePath, ioe);
                throw (new Exception("Can not open History File"));
            }
            parser = new JobHistoryParser(in);
            jobInfo = parser.parse();
        }
        Exception parseException = parser.getParseException();
        Assert.assertNull("Caught an expected exception " + parseException, parseException);
        for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
            TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
            CompletedTask ct = new CompletedTask(yarnTaskID, entry.getValue());
            Assert.assertNotNull("completed task report has null counters", ct.getReport().getCounters());
        }
        final List<String> originalDiagnostics = job.getDiagnostics();
        final String historyError = jobInfo.getErrorInfo();
        assertTrue("No original diagnostics for a failed job", originalDiagnostics != null && !originalDiagnostics.isEmpty());
        assertNotNull("No history error info for a failed job ", historyError);
        for (String diagString : originalDiagnostics) {
            assertTrue(historyError.contains(diagString));
        }
    } finally {
        LOG.info("FINISHED testCountersForFailedTask");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) TaskID(org.apache.hadoop.mapreduce.TaskID) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) IOException(java.io.IOException) TaskInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo) JobHistoryParser(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser) JobInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Map(java.util.Map) HashMap(java.util.HashMap) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) FileContext(org.apache.hadoop.fs.FileContext) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Aggregations

Job (org.apache.hadoop.mapreduce.v2.app.job.Job)15 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)12 JobInfo (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo)8 Test (org.junit.Test)8 IOException (java.io.IOException)7 Configuration (org.apache.hadoop.conf.Configuration)7 Path (org.apache.hadoop.fs.Path)7 JobHistoryParser (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser)6 HistoryFileInfo (org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo)6 Date (java.util.Date)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)5 FileContext (org.apache.hadoop.fs.FileContext)4 TaskInfo (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo)4 JobReport (org.apache.hadoop.mapreduce.v2.api.records.JobReport)4 MRApp (org.apache.hadoop.mapreduce.v2.app.MRApp)4 JobInfo (org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo)4 HashMap (java.util.HashMap)3 GET (javax.ws.rs.GET)3 Path (javax.ws.rs.Path)3 Produces (javax.ws.rs.Produces)3