use of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent in project hadoop by apache.
the class KillAMPreemptionPolicy method killContainer.
@SuppressWarnings("unchecked")
private void killContainer(Context ctxt, PreemptionContainer c) {
ContainerId reqCont = c.getId();
TaskAttemptId reqTask = ctxt.getTaskAttempt(reqCont);
LOG.info("Evicting " + reqTask);
dispatcher.handle(new TaskAttemptEvent(reqTask, TaskAttemptEventType.TA_KILL));
// add preemption to counters
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(reqTask.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.TASKS_REQ_PREEMPT, 1);
dispatcher.handle(jce);
}
use of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent in project hadoop by apache.
the class TaskAttemptImpl method createJobCounterUpdateEventTAFailed.
private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
if (taskType == TaskType.MAP) {
jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
} else {
jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
}
if (!taskAlreadyCompleted) {
updateMillisCounters(jce, taskAttempt);
}
return jce;
}
use of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent in project hadoop by apache.
the class TaskAttemptImpl method createJobCounterUpdateEventTASucceeded.
private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(TaskAttemptImpl taskAttempt) {
TaskId taskId = taskAttempt.attemptId.getTaskId();
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
updateMillisCounters(jce, taskAttempt);
return jce;
}
use of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent in project hadoop by apache.
the class TaskAttemptImpl method updateMillisCounters.
private static void updateMillisCounters(JobCounterUpdateEvent jce, TaskAttemptImpl taskAttempt) {
// if container/resource if not allocated, do not update
if (null == taskAttempt.container || null == taskAttempt.container.getResource()) {
return;
}
long duration = (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
Resource allocatedResource = taskAttempt.container.getResource();
int mbAllocated = (int) allocatedResource.getMemorySize();
int vcoresAllocated = allocatedResource.getVirtualCores();
int minSlotMemSize = taskAttempt.conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
int simSlotsAllocated = minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) mbAllocated / minSlotMemSize);
TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
if (taskType == TaskType.MAP) {
jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, simSlotsAllocated * duration);
jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbAllocated);
jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS, duration * vcoresAllocated);
jce.addCounterUpdate(JobCounter.MILLIS_MAPS, duration);
} else {
jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, simSlotsAllocated * duration);
jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES, duration * mbAllocated);
jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES, duration * vcoresAllocated);
jce.addCounterUpdate(JobCounter.MILLIS_REDUCES, duration);
}
}
use of org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent in project hadoop by apache.
the class TaskAttemptImpl method sendLaunchedEvents.
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId().getJobId());
jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ? JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
eventHandler.handle(jce);
LOG.info("TaskAttempt: [" + attemptId + "] using containerId: [" + container.getId() + " on NM: [" + StringInterner.weakIntern(container.getNodeId().toString()) + "]");
TaskAttemptStartedEvent tase = new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId), TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()), launchTime, trackerName, httpPort, shufflePort, container.getId(), locality.toString(), avataar.toString());
eventHandler.handle(new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
Aggregations