use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TaskImpl method getLaunchTime.
//this is always called in read/write lock
private long getLaunchTime() {
long taskLaunchTime = 0;
boolean launchTimeSet = false;
for (TaskAttempt at : attempts.values()) {
// select the least launch time of all attempts
long attemptLaunchTime = at.getLaunchTime();
if (attemptLaunchTime != 0 && !launchTimeSet) {
// For the first non-zero launch time
launchTimeSet = true;
taskLaunchTime = attemptLaunchTime;
} else if (attemptLaunchTime != 0 && taskLaunchTime > attemptLaunchTime) {
taskLaunchTime = attemptLaunchTime;
}
}
if (!launchTimeSet) {
return this.scheduledTime;
}
return taskLaunchTime;
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class JobImpl method actOnUnusableNode.
private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) {
// running reducers
if (getInternalState() == JobStateInternal.RUNNING && !allReducersComplete()) {
List<TaskAttemptId> taskAttemptIdList = nodesToSucceededTaskAttempts.get(nodeId);
if (taskAttemptIdList != null) {
String mesg = "TaskAttempt killed because it ran on unusable node " + nodeId;
for (TaskAttemptId id : taskAttemptIdList) {
if (TaskType.MAP == id.getTaskId().getTaskType()) {
// reschedule only map tasks because their outputs maybe unusable
LOG.info(mesg + ". AttemptId:" + id);
// Kill the attempt and indicate that next map attempt should be
// rescheduled (i.e. considered as a fast fail map).
eventHandler.handle(new TaskAttemptKillEvent(id, mesg, true));
}
}
}
}
// currently running task attempts on unusable nodes are handled in
// RMContainerAllocator
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TaskAttemptImpl method createJobCounterUpdateEventTAFailed.
private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
if (taskType == TaskType.MAP) {
jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
} else {
jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
}
if (!taskAlreadyCompleted) {
updateMillisCounters(jce, taskAttempt);
}
return jce;
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TaskAttemptImpl method createJobCounterUpdateEventTASucceeded.
private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(TaskAttemptImpl taskAttempt) {
TaskId taskId = taskAttempt.attemptId.getTaskId();
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
updateMillisCounters(jce, taskAttempt);
return jce;
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class SingleCounterBlock method populateMembers.
private void populateMembers(AppContext ctx) {
JobId jobID = null;
TaskId taskID = null;
String tid = $(TASK_ID);
if ($(TITLE).contains("MAPS")) {
counterType = TaskType.MAP;
} else if ($(TITLE).contains("REDUCES")) {
counterType = TaskType.REDUCE;
} else {
counterType = null;
}
if (!tid.isEmpty()) {
taskID = MRApps.toTaskID(tid);
jobID = taskID.getJobId();
} else {
String jid = $(JOB_ID);
if (!jid.isEmpty()) {
jobID = MRApps.toJobID(jid);
}
}
if (jobID == null) {
return;
}
job = ctx.getJob(jobID);
if (job == null) {
return;
}
if (taskID != null) {
task = job.getTask(taskID);
if (task == null) {
return;
}
for (Map.Entry<TaskAttemptId, TaskAttempt> entry : task.getAttempts().entrySet()) {
long value = 0;
Counters counters = entry.getValue().getCounters();
CounterGroup group = (counters != null) ? counters.getGroup($(COUNTER_GROUP)) : null;
if (group != null) {
Counter c = group.findCounter($(COUNTER_NAME));
if (c != null) {
value = c.getValue();
}
}
values.put(MRApps.toString(entry.getKey()), value);
}
return;
}
// Get all types of counters
Map<TaskId, Task> tasks = job.getTasks();
for (Map.Entry<TaskId, Task> entry : tasks.entrySet()) {
long value = 0;
Counters counters = entry.getValue().getCounters();
CounterGroup group = (counters != null) ? counters.getGroup($(COUNTER_GROUP)) : null;
if (group != null) {
Counter c = group.findCounter($(COUNTER_NAME));
if (c != null) {
value = c.getValue();
}
}
if (counterType == null || counterType == entry.getValue().getType()) {
values.put(MRApps.toString(entry.getKey()), value);
}
}
}
Aggregations