use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TestMRApp method testCommitPending.
@Test
public void testCommitPending() throws Exception {
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
TaskAttempt attempt = task.getAttempts().values().iterator().next();
app.waitForState(attempt, TaskAttemptState.RUNNING);
//send the commit pending signal to the task
app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_COMMIT_PENDING));
//wait for first attempt to commit pending
app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);
//re-send the commit pending signal to the task
app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_COMMIT_PENDING));
//the task attempt should be still at COMMIT_PENDING
app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);
//send the done signal to the task
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TestMRApp method testCompletedMapsForReduceSlowstart.
//@Test
public void testCompletedMapsForReduceSlowstart() throws Exception {
MRApp app = new MRApp(2, 1, false, this.getClass().getName(), true);
Configuration conf = new Configuration();
//after half of the map completion, reduce will start
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.5f);
//uberization forces full slowstart (1.0), so disable that
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task1Attempt = mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
// reduces must be in NEW state
Assert.assertEquals("Reduce Task state not correct", TaskState.NEW, reduceTask.getReport().getTaskState());
//send the done signal to the 1st map task
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask1.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
//wait for first map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
//Once the first map completes, it will schedule the reduces
//now reduce must be running
app.waitForState(reduceTask, TaskState.RUNNING);
//send the done signal to 2nd map and the reduce to complete the job
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class MockJobs method newTaskAttempt.
public static TaskAttempt newTaskAttempt(TaskId tid, int i) {
final TaskAttemptId taid = Records.newRecord(TaskAttemptId.class);
taid.setTaskId(tid);
taid.setId(i);
final TaskAttemptReport report = newTaskAttemptReport(taid);
return new TaskAttempt() {
@Override
public NodeId getNodeId() throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
@Override
public TaskAttemptId getID() {
return taid;
}
@Override
public TaskAttemptReport getReport() {
return report;
}
@Override
public long getLaunchTime() {
return report.getStartTime();
}
@Override
public long getFinishTime() {
return report.getFinishTime();
}
@Override
public int getShufflePort() {
return ShuffleHandler.DEFAULT_SHUFFLE_PORT;
}
@Override
public Counters getCounters() {
if (report != null && report.getCounters() != null) {
return new Counters(TypeConverter.fromYarn(report.getCounters()));
}
return null;
}
@Override
public float getProgress() {
return report.getProgress();
}
@Override
public Phase getPhase() {
return report.getPhase();
}
@Override
public TaskAttemptState getState() {
return report.getTaskAttemptState();
}
@Override
public boolean isFinished() {
switch(report.getTaskAttemptState()) {
case SUCCEEDED:
case FAILED:
case KILLED:
return true;
}
return false;
}
@Override
public ContainerId getAssignedContainerID() {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(taid.getTaskId().getJobId().getAppId(), 0);
ContainerId id = ContainerId.newContainerId(appAttemptId, 0);
return id;
}
@Override
public String getNodeHttpAddress() {
return "localhost:8042";
}
@Override
public List<String> getDiagnostics() {
return Lists.newArrayList(report.getDiagnosticInfo());
}
@Override
public String getAssignedContainerMgrAddress() {
return "localhost:9998";
}
@Override
public long getShuffleFinishTime() {
return report.getShuffleFinishTime();
}
@Override
public long getSortFinishTime() {
return report.getSortFinishTime();
}
@Override
public String getNodeRackName() {
return "/default-rack";
}
};
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class MockJobs method newTaskAttempts.
public static Map<TaskAttemptId, TaskAttempt> newTaskAttempts(TaskId tid, int m) {
Map<TaskAttemptId, TaskAttempt> map = Maps.newHashMap();
for (int i = 0; i < m; ++i) {
TaskAttempt ta = newTaskAttempt(tid, i);
map.put(ta.getID(), ta);
}
return map;
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class AMWebServices method updateJobTaskAttemptState.
@PUT
@Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/state")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public Response updateJobTaskAttemptState(JobTaskAttemptState targetState, @Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) throws IOException, InterruptedException {
init();
Job job = getJobFromJobIdString(jid, appCtx);
checkAccess(job, hsr);
String remoteUser = hsr.getRemoteUser();
UserGroupInformation callerUGI = null;
if (remoteUser != null) {
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
}
Task task = getTaskFromTaskIdString(tid, job);
TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
if (!ta.getState().toString().equals(targetState.getState())) {
// allow users to kill the job task attempt
if (targetState.getState().equals(TaskAttemptState.KILLED.toString())) {
return killJobTaskAttempt(ta, callerUGI, hsr);
}
throw new BadRequestException("Only '" + TaskAttemptState.KILLED.toString() + "' is allowed as a target state.");
}
JobTaskAttemptState ret = new JobTaskAttemptState();
ret.setState(ta.getState().toString());
return Response.status(Status.OK).entity(ret).build();
}
Aggregations