use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class MockJobs method newTask.
public static Task newTask(JobId jid, int i, int m, final boolean hasFailedTasks) {
final TaskId tid = Records.newRecord(TaskId.class);
tid.setJobId(jid);
tid.setId(i);
tid.setTaskType(TASK_TYPES.next());
final TaskReport report = newTaskReport(tid);
final Map<TaskAttemptId, TaskAttempt> attempts = newTaskAttempts(tid, m);
return new Task() {
@Override
public TaskId getID() {
return tid;
}
@Override
public TaskReport getReport() {
return report;
}
@Override
public Counters getCounters() {
if (hasFailedTasks) {
return null;
}
return new Counters(TypeConverter.fromYarn(report.getCounters()));
}
@Override
public float getProgress() {
return report.getProgress();
}
@Override
public TaskType getType() {
return tid.getTaskType();
}
@Override
public Map<TaskAttemptId, TaskAttempt> getAttempts() {
return attempts;
}
@Override
public TaskAttempt getAttempt(TaskAttemptId attemptID) {
return attempts.get(attemptID);
}
@Override
public boolean isFinished() {
switch(report.getTaskState()) {
case SUCCEEDED:
case KILLED:
case FAILED:
return true;
}
return false;
}
@Override
public boolean canCommit(TaskAttemptId taskAttemptID) {
return false;
}
@Override
public TaskState getState() {
return report.getTaskState();
}
};
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TestAMInfos method testAMInfosWithoutRecoveryEnabled.
@Test
public void testAMInfosWithoutRecoveryEnabled() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(1, 0, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
long am1StartTime = app.getAllAMInfos().get(0).getStartTime();
Assert.assertEquals("No of tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt taskAttempt = mapTask.getAttempts().values().iterator().next();
app.waitForState(taskAttempt, TaskAttemptState.RUNNING);
// stop the app
app.stop();
// rerun
app = new MRAppWithHistory(1, 0, false, this.getClass().getName(), false, ++runCount);
conf = new Configuration();
// in rerun the AMInfo will be recovered from previous run even if recovery
// is not enabled.
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct", 1, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask = it.next();
// There should be two AMInfos
List<AMInfo> amInfos = app.getAllAMInfos();
Assert.assertEquals(2, amInfos.size());
AMInfo amInfoOne = amInfos.get(0);
Assert.assertEquals(am1StartTime, amInfoOne.getStartTime());
app.stop();
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class StartEndTimesBase method updateAttempt.
@Override
public void updateAttempt(TaskAttemptStatus status, long timestamp) {
TaskAttemptId attemptID = status.id;
TaskId taskID = attemptID.getTaskId();
JobId jobID = taskID.getJobId();
Job job = context.getJob(jobID);
if (job == null) {
return;
}
Task task = job.getTask(taskID);
if (task == null) {
return;
}
Long boxedStart = startTimes.get(attemptID);
long start = boxedStart == null ? Long.MIN_VALUE : boxedStart;
TaskAttempt taskAttempt = task.getAttempt(attemptID);
if (taskAttempt.getState() == TaskAttemptState.SUCCEEDED) {
boolean isNew = false;
// is this a new success?
synchronized (doneTasks) {
if (!doneTasks.contains(task)) {
doneTasks.add(task);
isNew = true;
}
}
// local data] we only count the first one.
if (isNew) {
long finish = timestamp;
if (start > 1L && finish > 1L && start <= finish) {
long duration = finish - start;
DataStatistics statistics = dataStatisticsForTask(taskID);
if (statistics != null) {
statistics.add(duration);
}
}
}
}
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class AMWebServices method getJobTaskAttemptIdCounters.
@GET
@Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
init();
Job job = getJobFromJobIdString(jid, appCtx);
checkAccess(job, hsr);
Task task = getTaskFromTaskIdString(tid, job);
TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
return new JobTaskAttemptCounterInfo(ta);
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class AMWebServices method killJobTaskAttempt.
protected Response killJobTaskAttempt(TaskAttempt ta, UserGroupInformation callerUGI, HttpServletRequest hsr) throws IOException, InterruptedException {
Preconditions.checkNotNull(ta, "ta cannot be null");
String userName = callerUGI.getUserName();
final TaskAttemptId attemptId = ta.getID();
try {
callerUGI.doAs(new PrivilegedExceptionAction<KillTaskAttemptResponse>() {
@Override
public KillTaskAttemptResponse run() throws IOException, YarnException {
KillTaskAttemptRequest req = new KillTaskAttemptRequestPBImpl();
req.setTaskAttemptId(attemptId);
return service.forceKillTaskAttempt(req);
}
});
} catch (UndeclaredThrowableException ue) {
// bubble that up to the user
if (ue.getCause() instanceof YarnException) {
YarnException ye = (YarnException) ue.getCause();
if (ye.getCause() instanceof AccessControlException) {
String taId = attemptId.toString();
String msg = "Unauthorized attempt to kill task attempt " + taId + " by remote user " + userName;
return Response.status(Status.FORBIDDEN).entity(msg).build();
} else {
throw ue;
}
} else {
throw ue;
}
}
JobTaskAttemptState ret = new JobTaskAttemptState();
ret.setState(TaskAttemptState.KILLED.toString());
return Response.status(Status.OK).entity(ret).build();
}
Aggregations