Search in sources :

Example 16 with JobIndexInfo

use of org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo in project hadoop by apache.

the class TestHistoryFileManager method testHistoryFileInfoLoadNormalSizedJobShouldReturnCompletedJob.

@Test
public void testHistoryFileInfoLoadNormalSizedJobShouldReturnCompletedJob() throws Exception {
    HistoryFileManagerTest hmTest = new HistoryFileManagerTest();
    final int numOfTasks = 100;
    Configuration conf = dfsCluster.getConfiguration(0);
    conf.setInt(JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX, numOfTasks + numOfTasks + 1);
    hmTest.init(conf);
    // set up a job of which the number of tasks is smaller than the maximum
    // allowed, and therefore will be fully loaded.
    final String jobId = "job_1416424547277_0002";
    JobIndexInfo jobIndexInfo = new JobIndexInfo();
    jobIndexInfo.setJobId(TypeConverter.toYarn(JobID.forName(jobId)));
    jobIndexInfo.setNumMaps(numOfTasks);
    jobIndexInfo.setNumReduces(numOfTasks);
    final String historyFile = getClass().getClassLoader().getResource("job_2.0.3-alpha-FAILED.jhist").getFile();
    final Path historyFilePath = FileSystem.getLocal(conf).makeQualified(new Path(historyFile));
    HistoryFileInfo info = hmTest.getHistoryFileInfo(historyFilePath, null, null, jobIndexInfo, false);
    Job job = info.loadJob();
    Assert.assertTrue("Should return an instance of CompletedJob as " + "a result of parsing the job history file of the job", job instanceof CompletedJob);
}
Also used : Path(org.apache.hadoop.fs.Path) HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobIndexInfo(org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo) Test(org.junit.Test)

Example 17 with JobIndexInfo

use of org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo in project hadoop by apache.

the class TestHistoryFileManager method testHistoryFileInfoLoadOversizedJobShouldReturnUnParsedJob.

@Test
public void testHistoryFileInfoLoadOversizedJobShouldReturnUnParsedJob() throws Exception {
    HistoryFileManagerTest hmTest = new HistoryFileManagerTest();
    int allowedMaximumTasks = 5;
    Configuration conf = dfsCluster.getConfiguration(0);
    conf.setInt(JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX, allowedMaximumTasks);
    hmTest.init(conf);
    // set up a job of which the number of tasks is greater than maximum allowed
    String jobId = "job_1410889000000_123456";
    JobIndexInfo jobIndexInfo = new JobIndexInfo();
    jobIndexInfo.setJobId(TypeConverter.toYarn(JobID.forName(jobId)));
    jobIndexInfo.setNumMaps(allowedMaximumTasks);
    jobIndexInfo.setNumReduces(allowedMaximumTasks);
    HistoryFileInfo info = hmTest.getHistoryFileInfo(null, null, null, jobIndexInfo, false);
    Job job = info.loadJob();
    Assert.assertTrue("Should return an instance of UnparsedJob to indicate" + " the job history file is not parsed", job instanceof UnparsedJob);
}
Also used : HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobIndexInfo(org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo) Test(org.junit.Test)

Example 18 with JobIndexInfo

use of org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo in project hadoop by apache.

the class HistoryFileManager method getJobFileInfo.

/**
   * Searches the job history file FileStatus list for the specified JobId.
   * 
   * @param fileStatusList
   *          fileStatus list of Job History Files.
   * @param jobId
   *          The JobId to find.
   * @return A FileInfo object for the jobId, null if not found.
   * @throws IOException
   */
private HistoryFileInfo getJobFileInfo(List<FileStatus> fileStatusList, JobId jobId) throws IOException {
    for (FileStatus fs : fileStatusList) {
        JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath().getName());
        if (jobIndexInfo.getJobId().equals(jobId)) {
            String confFileName = JobHistoryUtils.getIntermediateConfFileName(jobIndexInfo.getJobId());
            String summaryFileName = JobHistoryUtils.getIntermediateSummaryFileName(jobIndexInfo.getJobId());
            HistoryFileInfo fileInfo = createHistoryFileInfo(fs.getPath(), new Path(fs.getPath().getParent(), confFileName), new Path(fs.getPath().getParent(), summaryFileName), jobIndexInfo, true);
            return fileInfo;
        }
    }
    return null;
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) JobIndexInfo(org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo)

Example 19 with JobIndexInfo

use of org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo in project hadoop by apache.

the class TestJobHistoryParsing method testPartialJob.

/**
   * Simple test PartialJob
   */
@Test(timeout = 3000)
public void testPartialJob() throws Exception {
    JobId jobId = new JobIdPBImpl();
    jobId.setId(0);
    JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user", "jobName", jobId, 3, 2, "JobStatus");
    PartialJob test = new PartialJob(jii, jobId);
    Assert.assertEquals(1.0f, test.getProgress(), 0.001f);
    assertNull(test.getAllCounters());
    assertNull(test.getTasks());
    assertNull(test.getTasks(TaskType.MAP));
    assertNull(test.getTask(new TaskIdPBImpl()));
    assertNull(test.getTaskAttemptCompletionEvents(0, 100));
    assertNull(test.getMapAttemptCompletionEvents(0, 100));
    assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null));
    assertNull(test.getAMInfos());
}
Also used : TaskIdPBImpl(org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl) JobIdPBImpl(org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) JobIndexInfo(org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)13 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)11 JobIndexInfo (org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo)10 JobID (org.apache.hadoop.mapreduce.JobID)9 Path (org.apache.hadoop.fs.Path)7 Configuration (org.apache.hadoop.conf.Configuration)4 FileStatus (org.apache.hadoop.fs.FileStatus)4 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)4 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)4 HistoryFileInfo (org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo)4 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)4 IOException (java.io.IOException)1 Map (java.util.Map)1 JobReport (org.apache.hadoop.mapreduce.v2.api.records.JobReport)1 JobIdPBImpl (org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl)1 TaskIdPBImpl (org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl)1