use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TestSpeculativeExecutionWithMRApp method testSepculateSuccessfulWithUpdateEvents.
@Test
public void testSepculateSuccessfulWithUpdateEvents() throws Exception {
Clock actualClock = SystemClock.getInstance();
final ControlledClock clock = new ControlledClock(actualClock);
clock.setTime(System.currentTimeMillis());
MRApp app = new MRApp(NUM_MAPPERS, NUM_REDUCERS, false, "test", true, clock);
Job job = app.submit(new Configuration(), true, true);
app.waitForState(job, JobState.RUNNING);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", NUM_MAPPERS + NUM_REDUCERS, tasks.size());
Iterator<Task> taskIter = tasks.values().iterator();
while (taskIter.hasNext()) {
app.waitForState(taskIter.next(), TaskState.RUNNING);
}
// Process the update events
clock.setTime(System.currentTimeMillis() + 1000);
EventHandler appEventHandler = app.getContext().getEventHandler();
for (Map.Entry<TaskId, Task> mapTask : tasks.entrySet()) {
for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : mapTask.getValue().getAttempts().entrySet()) {
TaskAttemptStatus status = createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.5, TaskAttemptState.RUNNING);
TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
appEventHandler.handle(event);
}
}
Task speculatedTask = null;
int numTasksToFinish = NUM_MAPPERS + NUM_REDUCERS - 1;
clock.setTime(System.currentTimeMillis() + 1000);
for (Map.Entry<TaskId, Task> task : tasks.entrySet()) {
for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : task.getValue().getAttempts().entrySet()) {
if (numTasksToFinish > 0) {
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(), TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
numTasksToFinish--;
app.waitForState(taskAttempt.getValue(), TaskAttemptState.SUCCEEDED);
} else {
// The last task is chosen for speculation
TaskAttemptStatus status = createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.75, TaskAttemptState.RUNNING);
speculatedTask = task.getValue();
TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
appEventHandler.handle(event);
}
}
}
clock.setTime(System.currentTimeMillis() + 15000);
for (Map.Entry<TaskId, Task> task : tasks.entrySet()) {
for (Map.Entry<TaskAttemptId, TaskAttempt> taskAttempt : task.getValue().getAttempts().entrySet()) {
if (taskAttempt.getValue().getState() != TaskAttemptState.SUCCEEDED) {
TaskAttemptStatus status = createTaskAttemptStatus(taskAttempt.getKey(), (float) 0.75, TaskAttemptState.RUNNING);
TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(), status);
appEventHandler.handle(event);
}
}
}
final Task speculatedTaskConst = speculatedTask;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
if (speculatedTaskConst.getAttempts().size() != 2) {
clock.setTime(System.currentTimeMillis() + 1000);
return false;
} else {
return true;
}
}
}, 1000, 60000);
TaskAttempt[] ta = makeFirstAttemptWin(appEventHandler, speculatedTask);
verifySpeculationMessage(app, ta);
app.waitForState(Service.STATE.STOPPED);
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TestSpeculativeExecutionWithMRApp method makeFirstAttemptWin.
private static TaskAttempt[] makeFirstAttemptWin(EventHandler appEventHandler, Task speculatedTask) {
// finish 1st TA, 2nd will be killed
Collection<TaskAttempt> attempts = speculatedTask.getAttempts().values();
TaskAttempt[] ta = new TaskAttempt[attempts.size()];
attempts.toArray(ta);
appEventHandler.handle(new TaskAttemptEvent(ta[0].getID(), TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(ta[0].getID(), TaskAttemptEventType.TA_CONTAINER_COMPLETED));
return ta;
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TestHsWebServicesAttempts method testTaskAttemptId.
@Test
public void testTaskAttemptId() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8, response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info, att, task.getType());
}
}
}
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TestHsWebServicesAttempts method verifyTaskAttemptGeneric.
public void verifyTaskAttemptGeneric(TaskAttempt ta, TaskType ttype, String id, String state, String type, String rack, String nodeHttpAddress, String diagnostics, String assignedContainerId, long startTime, long finishTime, long elapsedTime, float progress) {
TaskAttemptId attid = ta.getID();
String attemptId = MRApps.toString(attid);
WebServicesTestUtils.checkStringMatch("id", attemptId, id);
WebServicesTestUtils.checkStringMatch("type", ttype.toString(), type);
WebServicesTestUtils.checkStringMatch("state", ta.getState().toString(), state);
WebServicesTestUtils.checkStringMatch("rack", ta.getNodeRackName(), rack);
WebServicesTestUtils.checkStringMatch("nodeHttpAddress", ta.getNodeHttpAddress(), nodeHttpAddress);
String expectDiag = "";
List<String> diagnosticsList = ta.getDiagnostics();
if (diagnosticsList != null && !diagnostics.isEmpty()) {
StringBuffer b = new StringBuffer();
for (String diag : diagnosticsList) {
b.append(diag);
}
expectDiag = b.toString();
}
WebServicesTestUtils.checkStringMatch("diagnostics", expectDiag, diagnostics);
WebServicesTestUtils.checkStringMatch("assignedContainerId", ta.getAssignedContainerID().toString(), assignedContainerId);
assertEquals("startTime wrong", ta.getLaunchTime(), startTime);
assertEquals("finishTime wrong", ta.getFinishTime(), finishTime);
assertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
assertEquals("progress wrong", ta.getProgress() * 100, progress, 1e-3f);
}
use of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt in project hadoop by apache.
the class TestJobHistoryEvents method verifyTask.
private void verifyTask(Task task) {
Assert.assertEquals("Task state not currect", TaskState.SUCCEEDED, task.getState());
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
Assert.assertEquals("No of attempts not correct", 1, attempts.size());
for (TaskAttempt attempt : attempts.values()) {
verifyAttempt(attempt);
}
}
Aggregations