use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.
the class KillAMPreemptionPolicy method killContainer.
@SuppressWarnings("unchecked")
private void killContainer(Context ctxt, PreemptionContainer c) {
ContainerId reqCont = c.getId();
TaskAttemptId reqTask = ctxt.getTaskAttempt(reqCont);
LOG.info("Evicting " + reqTask);
dispatcher.handle(new TaskAttemptEvent(reqTask, TaskAttemptEventType.TA_KILL));
// add preemption to counters
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(reqTask.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.TASKS_REQ_PREEMPT, 1);
dispatcher.handle(jce);
}
use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.
the class TaskAttemptListenerImpl method preempted.
@Override
public void preempted(TaskAttemptID taskAttemptID, TaskStatus taskStatus) throws IOException, InterruptedException {
LOG.info("Preempted state update from " + taskAttemptID.toString());
// An attempt is telling us that it got preempted.
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
preemptionPolicy.reportSuccessfulPreemption(attemptID);
taskHeartbeatHandler.progressing(attemptID);
context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_PREEMPTED));
}
use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.
the class TaskAttemptListenerImpl method fsError.
@Override
public void fsError(TaskAttemptID taskAttemptID, String message) throws IOException {
// This happens only in Child.
LOG.fatal("Task: " + taskAttemptID + " - failed due to FSError: " + message);
reportDiagnosticInfo(taskAttemptID, "FSError: " + message);
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
// handling checkpoints
preemptionPolicy.handleFailedContainer(attemptID);
context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.
the class TaskAttemptListenerImpl method commitPending.
/**
* TaskAttempt is reporting that it is in commit_pending and it is waiting for
* the commit Response
*
* <br>
* Commit it a two-phased protocol. First the attempt informs the
* ApplicationMaster that it is
* {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
* the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
* a legacy from the centralized commit protocol handling by the JobTracker.
*/
@Override
public void commitPending(TaskAttemptID taskAttemptID, TaskStatus taskStatsu) throws IOException, InterruptedException {
LOG.info("Commit-pending state update from " + taskAttemptID.toString());
// An attempt is asking if it can commit its output. This can be decided
// only by the task which is managing the multiple attempts. So redirect the
// request there.
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
taskHeartbeatHandler.progressing(attemptID);
//Ignorable TaskStatus? - since a task will send a LastStatusUpdate
context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_COMMIT_PENDING));
}
use of org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent in project hadoop by apache.
the class TaskAttemptListenerImpl method done.
@Override
public void done(TaskAttemptID taskAttemptID) throws IOException {
LOG.info("Done acknowledgment from " + taskAttemptID.toString());
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID);
taskHeartbeatHandler.progressing(attemptID);
context.getEventHandler().handle(new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
}
Aggregations