use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.
the class TestJobHistoryEntities method testGetTaskAttemptCompletionEvent.
/**
* Simple test of some methods of CompletedJob
* @throws Exception
*/
@Test(timeout = 30000)
public void testGetTaskAttemptCompletionEvent() throws Exception {
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob = new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user", info, jobAclsManager);
TaskCompletionEvent[] events = completedJob.getMapAttemptCompletionEvents(0, 1000);
assertEquals(10, completedJob.getMapAttemptCompletionEvents(0, 10).length);
int currentEventId = 0;
for (TaskCompletionEvent taskAttemptCompletionEvent : events) {
int eventId = taskAttemptCompletionEvent.getEventId();
assertTrue(eventId >= currentEventId);
currentEventId = eventId;
}
assertNull(completedJob.loadConfFile());
// job name
assertEquals("Sleep job", completedJob.getName());
// queue name
assertEquals("default", completedJob.getQueueName());
// progress
assertEquals(1.0, completedJob.getProgress(), 0.001);
// 12 rows in answer
assertEquals(12, completedJob.getTaskAttemptCompletionEvents(0, 1000).length);
// select first 10 rows
assertEquals(10, completedJob.getTaskAttemptCompletionEvents(0, 10).length);
// select 5-10 rows include 5th
assertEquals(7, completedJob.getTaskAttemptCompletionEvents(5, 10).length);
// without errors
assertEquals(1, completedJob.getDiagnostics().size());
assertEquals("", completedJob.getDiagnostics().get(0));
assertEquals(0, completedJob.getJobACLs().size());
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.
the class TaskImpl method handleTaskAttemptCompletion.
// always called inside a transition, in turn inside the Write Lock
private void handleTaskAttemptCompletion(TaskAttemptId attemptId, TaskAttemptCompletionEventStatus status) {
TaskAttempt attempt = attempts.get(attemptId);
// to nextAttemptNumber
if (attempt.getNodeHttpAddress() != null) {
TaskAttemptCompletionEvent tce = recordFactory.newRecordInstance(TaskAttemptCompletionEvent.class);
tce.setEventId(-1);
String scheme = (encryptedShuffle) ? "https://" : "http://";
tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme + attempt.getNodeHttpAddress().split(":")[0] + ":" + attempt.getShufflePort()));
tce.setStatus(status);
tce.setAttemptId(attempt.getID());
int runTime = 0;
if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() != 0)
runTime = (int) (attempt.getFinishTime() - attempt.getLaunchTime());
tce.setAttemptRunTime(runTime);
//raise the event to job so that it adds the completion event to its
//data structures
eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
}
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.
the class MockJobs method newJob.
public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile, boolean hasFailedTasks) {
final JobId id = newJobID(appID, i);
final String name = newJobName();
final JobReport report = newJobReport(id);
final Map<TaskId, Task> tasks = newTasks(id, n, m, hasFailedTasks);
final TaskCount taskCount = getTaskCount(tasks.values());
final Counters counters = getCounters(tasks.values());
final Path configFile = confFile;
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
final Configuration conf = new Configuration();
conf.set(JobACL.VIEW_JOB.getAclName(), "testuser");
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
return new Job() {
@Override
public JobId getID() {
return id;
}
@Override
public String getName() {
return name;
}
@Override
public JobState getState() {
return report.getJobState();
}
@Override
public JobReport getReport() {
return report;
}
@Override
public float getProgress() {
return 0;
}
@Override
public Counters getAllCounters() {
return counters;
}
@Override
public Map<TaskId, Task> getTasks() {
return tasks;
}
@Override
public Task getTask(TaskId taskID) {
return tasks.get(taskID);
}
@Override
public int getTotalMaps() {
return taskCount.maps;
}
@Override
public int getTotalReduces() {
return taskCount.reduces;
}
@Override
public int getCompletedMaps() {
return taskCount.completedMaps;
}
@Override
public int getCompletedReduces() {
return taskCount.completedReduces;
}
@Override
public boolean isUber() {
return false;
}
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(int fromEventId, int maxEvents) {
return null;
}
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
return null;
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public List<String> getDiagnostics() {
return Collections.<String>emptyList();
}
@Override
public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
return true;
}
@Override
public String getUserName() {
return "mock";
}
@Override
public String getQueueName() {
return "mockqueue";
}
@Override
public Path getConfFile() {
return configFile;
}
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
return jobACLs;
}
@Override
public List<AMInfo> getAMInfos() {
List<AMInfo> amInfoList = new LinkedList<AMInfo>();
amInfoList.add(createAMInfo(1));
amInfoList.add(createAMInfo(2));
return amInfoList;
}
@Override
public Configuration loadConfFile() throws IOException {
FileContext fc = FileContext.getFileContext(configFile.toUri(), conf);
Configuration jobConf = new Configuration(false);
jobConf.addResource(fc.open(configFile), configFile.toString());
return jobConf;
}
@Override
public void setQueueName(String queueName) {
// do nothing
}
@Override
public void setJobPriority(Priority priority) {
// do nothing
}
};
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.
the class TestTaskAttemptListenerImpl method testGetMapCompletionEvents.
@Test(timeout = 10000)
public void testGetMapCompletionEvents() throws IOException {
TaskAttemptCompletionEvent[] empty = {};
TaskAttemptCompletionEvent[] taskEvents = { createTce(0, true, TaskAttemptCompletionEventStatus.OBSOLETE), createTce(1, false, TaskAttemptCompletionEventStatus.FAILED), createTce(2, true, TaskAttemptCompletionEventStatus.SUCCEEDED), createTce(3, false, TaskAttemptCompletionEventStatus.FAILED) };
TaskAttemptCompletionEvent[] mapEvents = { taskEvents[0], taskEvents[2] };
Job mockJob = mock(Job.class);
when(mockJob.getTaskAttemptCompletionEvents(0, 100)).thenReturn(taskEvents);
when(mockJob.getTaskAttemptCompletionEvents(0, 2)).thenReturn(Arrays.copyOfRange(taskEvents, 0, 2));
when(mockJob.getTaskAttemptCompletionEvents(2, 100)).thenReturn(Arrays.copyOfRange(taskEvents, 2, 4));
when(mockJob.getMapAttemptCompletionEvents(0, 100)).thenReturn(TypeConverter.fromYarn(mapEvents));
when(mockJob.getMapAttemptCompletionEvents(0, 2)).thenReturn(TypeConverter.fromYarn(mapEvents));
when(mockJob.getMapAttemptCompletionEvents(2, 100)).thenReturn(TypeConverter.fromYarn(empty));
AppContext appCtx = mock(AppContext.class);
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
final TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher = mock(Dispatcher.class);
@SuppressWarnings("unchecked") EventHandler<Event> ea = mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, policy) {
@Override
protected void registerHeartbeatHandler(Configuration conf) {
taskHeartbeatHandler = hbHandler;
}
};
Configuration conf = new Configuration();
listener.init(conf);
listener.start();
JobID jid = new JobID("12345", 1);
TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 0);
MapTaskCompletionEventsUpdate update = listener.getMapCompletionEvents(jid, 0, 100, tid);
assertEquals(2, update.events.length);
update = listener.getMapCompletionEvents(jid, 0, 2, tid);
assertEquals(2, update.events.length);
update = listener.getMapCompletionEvents(jid, 2, 100, tid);
assertEquals(0, update.events.length);
}
use of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent in project hadoop by apache.
the class TestFetchFailure method testFetchFailureMultipleReduces.
@Test
public void testFetchFailureMultipleReduces() throws Exception {
MRApp app = new MRApp(1, 3, false, this.getClass().getName(), true);
Configuration conf = new Configuration();
// map -> reduce -> fetch-failure -> map retry is incompatible with
// sequential, single-task-attempt approach in uber-AM, so disable:
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct", 4, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
Task reduceTask2 = it.next();
Task reduceTask3 = it.next();
//wait for Task state move to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(), TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct", 1, events.length);
Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
// wait for reduce to start running
app.waitForState(reduceTask, TaskState.RUNNING);
app.waitForState(reduceTask2, TaskState.RUNNING);
app.waitForState(reduceTask3, TaskState.RUNNING);
TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
updateStatus(app, reduceAttempt, Phase.SHUFFLE);
TaskAttempt reduceAttempt2 = reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt2, TaskAttemptState.RUNNING);
updateStatus(app, reduceAttempt2, Phase.SHUFFLE);
TaskAttempt reduceAttempt3 = reduceTask3.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt3, TaskAttemptState.RUNNING);
updateStatus(app, reduceAttempt3, Phase.SHUFFLE);
//send 2 fetch failures from reduce to prepare for map re execution
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host1");
sendFetchFailure(app, reduceAttempt2, mapAttempt1, "host2");
//We should not re-launch the map task yet
assertEquals(TaskState.SUCCEEDED, mapTask.getState());
updateStatus(app, reduceAttempt2, Phase.REDUCE);
updateStatus(app, reduceAttempt3, Phase.REDUCE);
//send 3rd fetch failures from reduce to trigger map re execution
sendFetchFailure(app, reduceAttempt3, mapAttempt1, "host3");
//wait for map Task state move back to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
//map attempt must have become FAILED
Assert.assertEquals("Map TaskAttempt state not correct", TaskAttemptState.FAILED, mapAttempt1.getState());
Assert.assertEquals(mapAttempt1.getDiagnostics().get(0), "Too many fetch failures. Failing the attempt. " + "Last failure reported by " + reduceAttempt3.getID().toString() + " from host host3");
Assert.assertEquals("Num attempts in Map Task not correct", 2, mapTask.getAttempts().size());
Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2 = atIt.next();
app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
//send the done signal to the second map attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(), TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
//send done to reduce
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(), TaskAttemptEventType.TA_DONE));
//send done to reduce
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt2.getID(), TaskAttemptEventType.TA_DONE));
//send done to reduce
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt3.getID(), TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
//previous completion event now becomes obsolete
Assert.assertEquals("Event status not correct", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct", 6, events.length);
Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct", mapAttempt1.getID(), events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct", mapAttempt2.getID(), events[2].getAttemptId());
Assert.assertEquals("Event reduce attempt id not correct", reduceAttempt.getID(), events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1", TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2", TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
TaskCompletionEvent[] mapEvents = job.getMapAttemptCompletionEvents(0, 2);
TaskCompletionEvent[] convertedEvents = TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
Assert.assertArrayEquals("Unexpected map events", Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
mapEvents = job.getMapAttemptCompletionEvents(2, 200);
Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
Assert.assertEquals("Unexpected map event", convertedEvents[2], mapEvents[0]);
}
Aggregations