use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.
the class TasksBlock method render.
@Override
protected void render(Block html) {
if (app.getJob() == null) {
html.h2($(TITLE));
return;
}
TaskType type = null;
String symbol = $(TASK_TYPE);
if (!symbol.isEmpty()) {
type = MRApps.taskType(symbol);
}
TBODY<TABLE<Hamlet>> tbody = html.table("#tasks").thead().tr().th("Task").th("Progress").th("Status").th("State").th("Start Time").th("Finish Time").th("Elapsed Time")._()._().tbody();
StringBuilder tasksTableData = new StringBuilder("[\n");
for (Task task : app.getJob().getTasks().values()) {
if (type != null && task.getType() != type) {
continue;
}
String taskStateStr = $(TASK_STATE);
if (taskStateStr == null || taskStateStr.trim().equals("")) {
taskStateStr = "ALL";
}
if (!taskStateStr.equalsIgnoreCase("ALL")) {
try {
// get stateUI enum
MRApps.TaskStateUI stateUI = MRApps.taskState(taskStateStr);
if (!stateUI.correspondsTo(task.getState())) {
continue;
}
} catch (IllegalArgumentException e) {
// not supported state, ignore
continue;
}
}
TaskInfo info = new TaskInfo(task);
String tid = info.getId();
String pct = StringUtils.format("%.2f", info.getProgress());
tasksTableData.append("[\"<a href='").append(url("task", tid)).append("'>").append(tid).append("</a>\",\"").append("<br title='").append(pct).append("'> <div class='").append(C_PROGRESSBAR).append("' title='").append(join(pct, '%')).append("'> ").append("<div class='").append(C_PROGRESSBAR_VALUE).append("' style='").append(join("width:", pct, '%')).append("'> </div> </div>\",\"").append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(info.getStatus()))).append("\",\"").append(info.getState()).append("\",\"").append(info.getStartTime()).append("\",\"").append(info.getFinishTime()).append("\",\"").append(info.getElapsedTime()).append("\"],\n");
}
//Remove the last comma and close off the array of arrays
if (tasksTableData.charAt(tasksTableData.length() - 2) == ',') {
tasksTableData.delete(tasksTableData.length() - 2, tasksTableData.length() - 1);
}
tasksTableData.append("]");
html.script().$type("text/javascript")._("var tasksTableData=" + tasksTableData)._();
tbody._()._();
}
use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.
the class JobImpl method constructFinalFullcounters.
@Private
public void constructFinalFullcounters() {
this.fullCounters = new Counters();
this.finalMapCounters = new Counters();
this.finalReduceCounters = new Counters();
this.fullCounters.incrAllCounters(jobCounters);
for (Task t : this.tasks.values()) {
Counters counters = t.getCounters();
switch(t.getType()) {
case MAP:
this.finalMapCounters.incrAllCounters(counters);
break;
case REDUCE:
this.finalReduceCounters.incrAllCounters(counters);
break;
default:
throw new IllegalStateException("Task type neither map nor reduce: " + t.getType());
}
this.fullCounters.incrAllCounters(counters);
}
}
use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.
the class JobImpl method computeProgress.
private void computeProgress() {
this.readLock.lock();
try {
float mapProgress = 0f;
float reduceProgress = 0f;
for (Task task : this.tasks.values()) {
if (task.getType() == TaskType.MAP) {
mapProgress += (task.isFinished() ? 1f : task.getProgress());
} else {
reduceProgress += (task.isFinished() ? 1f : task.getProgress());
}
}
if (this.numMapTasks != 0) {
mapProgress = mapProgress / this.numMapTasks;
}
if (this.numReduceTasks != 0) {
reduceProgress = reduceProgress / this.numReduceTasks;
}
this.mapProgress = mapProgress;
this.reduceProgress = reduceProgress;
} finally {
this.readLock.unlock();
}
}
use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.
the class AMWebServices method getJobTasks.
@GET
@Path("/jobs/{jobid}/tasks")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public TasksInfo getJobTasks(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @QueryParam("type") String type) {
init();
Job job = getJobFromJobIdString(jid, appCtx);
checkAccess(job, hsr);
TasksInfo allTasks = new TasksInfo();
for (Task task : job.getTasks().values()) {
TaskType ttype = null;
if (type != null && !type.isEmpty()) {
try {
ttype = MRApps.taskType(type);
} catch (YarnRuntimeException e) {
throw new BadRequestException("tasktype must be either m or r");
}
}
if (ttype != null && task.getType() != ttype) {
continue;
}
allTasks.add(new TaskInfo(task));
}
return allTasks;
}
use of org.apache.hadoop.mapreduce.v2.app.job.Task in project hadoop by apache.
the class AMWebServices method getJobTask.
@GET
@Path("/jobs/{jobid}/tasks/{taskid}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public TaskInfo getJobTask(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
init();
Job job = getJobFromJobIdString(jid, appCtx);
checkAccess(job, hsr);
Task task = getTaskFromTaskIdString(tid, job);
return new TaskInfo(task);
}
Aggregations