use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class MockHistoryJobs method split.
private static JobsPair split(Map<JobId, Job> mocked) throws IOException {
JobsPair ret = new JobsPair();
ret.full = Maps.newHashMap();
ret.partial = Maps.newHashMap();
for (Map.Entry<JobId, Job> entry : mocked.entrySet()) {
JobId id = entry.getKey();
Job j = entry.getValue();
MockCompletedJob mockJob = new MockCompletedJob(j);
// use MockCompletedJob to set everything below to make sure
// consistent with what history server would do
ret.full.put(id, mockJob);
JobReport report = mockJob.getReport();
JobIndexInfo info = new JobIndexInfo(report.getStartTime(), report.getFinishTime(), mockJob.getUserName(), mockJob.getName(), id, mockJob.getCompletedMaps(), mockJob.getCompletedReduces(), String.valueOf(mockJob.getState()));
info.setJobStartTime(report.getStartTime());
info.setQueueName(mockJob.getQueueName());
ret.partial.put(id, new PartialJob(info, id));
}
return ret;
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class TestBlocks method testHsJobsBlock.
/**
* test HsJobsBlock's rendering.
*/
@Test
public void testHsJobsBlock() {
AppContext ctx = mock(AppContext.class);
Map<JobId, Job> jobs = new HashMap<JobId, Job>();
Job job = getJob();
jobs.put(job.getID(), job);
when(ctx.getAllJobs()).thenReturn(jobs);
HsJobsBlock block = new HsJobsBlockForTest(ctx);
PrintWriter pWriter = new PrintWriter(data);
Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false);
block.render(html);
pWriter.flush();
assertTrue(data.toString().contains("JobName"));
assertTrue(data.toString().contains("UserName"));
assertTrue(data.toString().contains("QueueName"));
assertTrue(data.toString().contains("SUCCEEDED"));
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class TestHsWebServicesAttempts method testTaskAttemptIdSlash.
@Test
public void testTaskAttemptIdSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info, att, task.getType());
}
}
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class TestHsWebServicesAttempts method testTaskAttemptIdDefault.
@Test
public void testTaskAttemptIdDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info, att, task.getType());
}
}
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class TestHistoryFileManager method testHistoryFileInfoShouldReturnCompletedJobIfMaxNotConfiged.
@Test
public void testHistoryFileInfoShouldReturnCompletedJobIfMaxNotConfiged() throws Exception {
HistoryFileManagerTest hmTest = new HistoryFileManagerTest();
Configuration conf = dfsCluster.getConfiguration(0);
conf.setInt(JHAdminConfig.MR_HS_LOADED_JOBS_TASKS_MAX, -1);
hmTest.init(conf);
final String jobId = "job_1416424547277_0002";
JobIndexInfo jobIndexInfo = new JobIndexInfo();
jobIndexInfo.setJobId(TypeConverter.toYarn(JobID.forName(jobId)));
jobIndexInfo.setNumMaps(100);
jobIndexInfo.setNumReduces(100);
final String historyFile = getClass().getClassLoader().getResource("job_2.0.3-alpha-FAILED.jhist").getFile();
final Path historyFilePath = FileSystem.getLocal(conf).makeQualified(new Path(historyFile));
HistoryFileInfo info = hmTest.getHistoryFileInfo(historyFilePath, null, null, jobIndexInfo, false);
Job job = info.loadJob();
Assert.assertTrue("Should return an instance of CompletedJob as " + "a result of parsing the job history file of the job", job instanceof CompletedJob);
}
Aggregations