use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class HsWebServices method getJobTask.
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public TaskInfo getJobTask(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
return new TaskInfo(task);
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class HsWebServices method getJobTaskAttemptId.
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId, task);
if (task.getType() == TaskType.REDUCE) {
return new ReduceTaskAttemptInfo(ta, task.getType());
} else {
return new TaskAttemptInfo(ta, task.getType(), false);
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class TestJobIdHistoryFileInfoMap method testWithSingleElement.
/**
* Trivial test case that verifies basic functionality of {@link
* JobIdHistoryFileInfoMap}
*/
@Test(timeout = 2000)
public void testWithSingleElement() throws InterruptedException {
JobIdHistoryFileInfoMap mapWithSize = new JobIdHistoryFileInfoMap();
JobId jobId = MRBuilderUtils.newJobId(1, 1, 1);
HistoryFileInfo fileInfo1 = Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo1.getJobId()).thenReturn(jobId);
// add it twice
assertEquals("Incorrect return on putIfAbsent()", null, mapWithSize.putIfAbsent(jobId, fileInfo1));
assertEquals("Incorrect return on putIfAbsent()", fileInfo1, mapWithSize.putIfAbsent(jobId, fileInfo1));
// check get()
assertEquals("Incorrect get()", fileInfo1, mapWithSize.get(jobId));
assertTrue("Incorrect size()", checkSize(mapWithSize, 1));
// check navigableKeySet()
NavigableSet<JobId> set = mapWithSize.navigableKeySet();
assertEquals("Incorrect navigableKeySet()", 1, set.size());
assertTrue("Incorrect navigableKeySet()", set.contains(jobId));
// check values()
Collection<HistoryFileInfo> values = mapWithSize.values();
assertEquals("Incorrect values()", 1, values.size());
assertTrue("Incorrect values()", values.contains(fileInfo1));
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class TestJobListCache method testEviction.
@Test(timeout = 5000)
public void testEviction() throws InterruptedException {
int maxSize = 2;
JobListCache cache = new JobListCache(maxSize, 1000);
JobId jobId1 = MRBuilderUtils.newJobId(1, 1, 1);
HistoryFileInfo fileInfo1 = Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo1.getJobId()).thenReturn(jobId1);
JobId jobId2 = MRBuilderUtils.newJobId(2, 2, 2);
HistoryFileInfo fileInfo2 = Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo2.getJobId()).thenReturn(jobId2);
JobId jobId3 = MRBuilderUtils.newJobId(3, 3, 3);
HistoryFileInfo fileInfo3 = Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo3.getJobId()).thenReturn(jobId3);
cache.addIfAbsent(fileInfo1);
cache.addIfAbsent(fileInfo2);
cache.addIfAbsent(fileInfo3);
Collection<HistoryFileInfo> values;
for (int i = 0; i < 9; i++) {
values = cache.values();
if (values.size() > maxSize) {
Thread.sleep(100);
} else {
assertFalse("fileInfo1 should have been evicted", values.contains(fileInfo1));
return;
}
}
fail("JobListCache didn't delete the extra entry");
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class HsWebServices method getJobTaskAttemptIdCounters.
@GET
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
init();
Job job = AMWebServices.getJobFromJobIdString(jid, ctx);
checkAccess(job, hsr);
Task task = AMWebServices.getTaskFromTaskIdString(tid, job);
TaskAttempt ta = AMWebServices.getTaskAttemptFromTaskAttemptString(attId, task);
return new JobTaskAttemptCounterInfo(ta);
}
Aggregations