Search in sources :

Example 1 with JobIndexInfo

use of org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo in project hadoop by apache.

the class TestFileNameIndexUtils method testTrimJobName.

@Test
public void testTrimJobName() throws IOException {
    int jobNameTrimLength = 5;
    JobIndexInfo info = new JobIndexInfo();
    JobID oldJobId = JobID.forName(JOB_ID);
    JobId jobId = TypeConverter.toYarn(oldJobId);
    info.setJobId(jobId);
    info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
    info.setUser(USER_NAME);
    info.setJobName(JOB_NAME);
    info.setFinishTime(Long.parseLong(FINISH_TIME));
    info.setNumMaps(Integer.parseInt(NUM_MAPS));
    info.setNumReduces(Integer.parseInt(NUM_REDUCES));
    info.setJobStatus(JOB_STATUS);
    info.setQueueName(QUEUE_NAME);
    info.setJobStartTime(Long.parseLong(JOB_START_TIME));
    String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info, jobNameTrimLength);
    JobIndexInfo parsedInfo = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
    Assert.assertEquals("Job name did not get trimmed correctly", info.getJobName().substring(0, jobNameTrimLength), parsedInfo.getJobName());
}
Also used : JobID(org.apache.hadoop.mapreduce.JobID) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 2 with JobIndexInfo

use of org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo in project hadoop by apache.

the class TestFileNameIndexUtils method testJobHistoryFileNameBackwardsCompatible.

@Test
public void testJobHistoryFileNameBackwardsCompatible() throws IOException {
    JobID oldJobId = JobID.forName(JOB_ID);
    JobId jobId = TypeConverter.toYarn(oldJobId);
    long submitTime = Long.parseLong(SUBMIT_TIME);
    long finishTime = Long.parseLong(FINISH_TIME);
    int numMaps = Integer.parseInt(NUM_MAPS);
    int numReduces = Integer.parseInt(NUM_REDUCES);
    String jobHistoryFile = String.format(OLD_JOB_HISTORY_FILE_FORMATTER, JOB_ID, SUBMIT_TIME, USER_NAME, JOB_NAME, FINISH_TIME, NUM_MAPS, NUM_REDUCES, JOB_STATUS);
    JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
    Assert.assertEquals("Job id incorrect after decoding old history file", jobId, info.getJobId());
    Assert.assertEquals("Submit time incorrect after decoding old history file", submitTime, info.getSubmitTime());
    Assert.assertEquals("User incorrect after decoding old history file", USER_NAME, info.getUser());
    Assert.assertEquals("Job name incorrect after decoding old history file", JOB_NAME, info.getJobName());
    Assert.assertEquals("Finish time incorrect after decoding old history file", finishTime, info.getFinishTime());
    Assert.assertEquals("Num maps incorrect after decoding old history file", numMaps, info.getNumMaps());
    Assert.assertEquals("Num reduces incorrect after decoding old history file", numReduces, info.getNumReduces());
    Assert.assertEquals("Job status incorrect after decoding old history file", JOB_STATUS, info.getJobStatus());
    Assert.assertNull("Queue name incorrect after decoding old history file", info.getQueueName());
}
Also used : JobID(org.apache.hadoop.mapreduce.JobID) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 3 with JobIndexInfo

use of org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo in project hadoop by apache.

the class TestFileNameIndexUtils method testQueueNamePercentEncoding.

@Test
public void testQueueNamePercentEncoding() throws IOException {
    JobIndexInfo info = new JobIndexInfo();
    JobID oldJobId = JobID.forName(JOB_ID);
    JobId jobId = TypeConverter.toYarn(oldJobId);
    info.setJobId(jobId);
    info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
    info.setUser(USER_NAME);
    info.setJobName(JOB_NAME);
    info.setFinishTime(Long.parseLong(FINISH_TIME));
    info.setNumMaps(Integer.parseInt(NUM_MAPS));
    info.setNumReduces(Integer.parseInt(NUM_REDUCES));
    info.setJobStatus(JOB_STATUS);
    info.setQueueName(QUEUE_NAME_WITH_DELIMITER);
    info.setJobStartTime(Long.parseLong(JOB_START_TIME));
    String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
    Assert.assertTrue("Queue name not encoded correctly into job history file", jobHistoryFile.contains(QUEUE_NAME_WITH_DELIMITER_ESCAPE));
}
Also used : JobID(org.apache.hadoop.mapreduce.JobID) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 4 with JobIndexInfo

use of org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo in project hadoop by apache.

the class TestFileNameIndexUtils method testEncodingDecodingEquivalence.

@Test
public void testEncodingDecodingEquivalence() throws IOException {
    JobIndexInfo info = new JobIndexInfo();
    JobID oldJobId = JobID.forName(JOB_ID);
    JobId jobId = TypeConverter.toYarn(oldJobId);
    info.setJobId(jobId);
    info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
    info.setUser(USER_NAME);
    info.setJobName(JOB_NAME);
    info.setFinishTime(Long.parseLong(FINISH_TIME));
    info.setNumMaps(Integer.parseInt(NUM_MAPS));
    info.setNumReduces(Integer.parseInt(NUM_REDUCES));
    info.setJobStatus(JOB_STATUS);
    info.setQueueName(QUEUE_NAME);
    info.setJobStartTime(Long.parseLong(JOB_START_TIME));
    String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
    JobIndexInfo parsedInfo = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
    Assert.assertEquals("Job id different after encoding and decoding", info.getJobId(), parsedInfo.getJobId());
    Assert.assertEquals("Submit time different after encoding and decoding", info.getSubmitTime(), parsedInfo.getSubmitTime());
    Assert.assertEquals("User different after encoding and decoding", info.getUser(), parsedInfo.getUser());
    Assert.assertEquals("Job name different after encoding and decoding", info.getJobName(), parsedInfo.getJobName());
    Assert.assertEquals("Finish time different after encoding and decoding", info.getFinishTime(), parsedInfo.getFinishTime());
    Assert.assertEquals("Num maps different after encoding and decoding", info.getNumMaps(), parsedInfo.getNumMaps());
    Assert.assertEquals("Num reduces different after encoding and decoding", info.getNumReduces(), parsedInfo.getNumReduces());
    Assert.assertEquals("Job status different after encoding and decoding", info.getJobStatus(), parsedInfo.getJobStatus());
    Assert.assertEquals("Queue name different after encoding and decoding", info.getQueueName(), parsedInfo.getQueueName());
    Assert.assertEquals("Job start time different after encoding and decoding", info.getJobStartTime(), parsedInfo.getJobStartTime());
}
Also used : JobID(org.apache.hadoop.mapreduce.JobID) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 5 with JobIndexInfo

use of org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo in project hadoop by apache.

the class MockHistoryJobs method split.

private static JobsPair split(Map<JobId, Job> mocked) throws IOException {
    JobsPair ret = new JobsPair();
    ret.full = Maps.newHashMap();
    ret.partial = Maps.newHashMap();
    for (Map.Entry<JobId, Job> entry : mocked.entrySet()) {
        JobId id = entry.getKey();
        Job j = entry.getValue();
        MockCompletedJob mockJob = new MockCompletedJob(j);
        // use MockCompletedJob to set everything below to make sure
        // consistent with what history server would do
        ret.full.put(id, mockJob);
        JobReport report = mockJob.getReport();
        JobIndexInfo info = new JobIndexInfo(report.getStartTime(), report.getFinishTime(), mockJob.getUserName(), mockJob.getName(), id, mockJob.getCompletedMaps(), mockJob.getCompletedReduces(), String.valueOf(mockJob.getState()));
        info.setJobStartTime(report.getStartTime());
        info.setQueueName(mockJob.getQueueName());
        ret.partial.put(id, new PartialJob(info, id));
    }
    return ret;
}
Also used : Job(org.apache.hadoop.mapreduce.v2.app.job.Job) Map(java.util.Map) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) JobReport(org.apache.hadoop.mapreduce.v2.api.records.JobReport) JobIndexInfo(org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo)

Aggregations

Test (org.junit.Test)13 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)11 JobIndexInfo (org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo)10 JobID (org.apache.hadoop.mapreduce.JobID)9 Path (org.apache.hadoop.fs.Path)7 Configuration (org.apache.hadoop.conf.Configuration)4 FileStatus (org.apache.hadoop.fs.FileStatus)4 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)4 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)4 HistoryFileInfo (org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo)4 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)4 IOException (java.io.IOException)1 Map (java.util.Map)1 JobReport (org.apache.hadoop.mapreduce.v2.api.records.JobReport)1 JobIdPBImpl (org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl)1 TaskIdPBImpl (org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl)1