use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.
the class TestRecovery method testCrashed.
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
*
* @throws Exception
*/
@Test
public void testCrashed() throws Exception {
int runCount = 0;
long am1StartTimeEst = System.currentTimeMillis();
MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
long jobStartTime = job.getReport().getStartTime();
//all maps would be running
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
app.waitForState(reduceTask, TaskState.RUNNING);
/////////// Play some games with the TaskAttempts of the first task //////
//send the fail signal to the 1st map task attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(), TaskAttemptEventType.TA_FAILMSG));
app.waitForState(task1Attempt1, TaskAttemptState.FAILED);
int timeOut = 0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(2, mapTask1.getAttempts().size());
Iterator<TaskAttempt> itr = mapTask1.getAttempts().values().iterator();
itr.next();
TaskAttempt task1Attempt2 = itr.next();
// wait for the second task attempt to be assigned.
waitForContainerAssignment(task1Attempt2);
// This attempt will automatically fail because of the way ContainerLauncher
// is setup
// This attempt 'disappears' from JobHistory and so causes MAPREDUCE-3846
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt2.getID(), TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
app.waitForState(task1Attempt2, TaskAttemptState.FAILED);
timeOut = 0;
while (mapTask1.getAttempts().size() != 3 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(3, mapTask1.getAttempts().size());
itr = mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
TaskAttempt task1Attempt3 = itr.next();
app.waitForState(task1Attempt3, TaskAttemptState.RUNNING);
//send the kill signal to the 1st map 3rd attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt3.getID(), TaskAttemptEventType.TA_KILL));
app.waitForState(task1Attempt3, TaskAttemptState.KILLED);
timeOut = 0;
while (mapTask1.getAttempts().size() != 4 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(4, mapTask1.getAttempts().size());
itr = mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
itr.next();
TaskAttempt task1Attempt4 = itr.next();
app.waitForState(task1Attempt4, TaskAttemptState.RUNNING);
//send the done signal to the 1st map 4th attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt4.getID(), TaskAttemptEventType.TA_DONE));
/////////// End of games with the TaskAttempts of the first task //////
//wait for first map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
long task1StartTime = mapTask1.getReport().getStartTime();
long task1FinishTime = mapTask1.getReport().getFinishTime();
//stop the app
app.stop();
//rerun
//in rerun the 1st map will be recovered from previous run
long am2StartTimeEst = System.currentTimeMillis();
app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false, ++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
reduceTask = it.next();
// first map will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.RUNNING);
task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
//send the done signal to the 2nd map task
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(mapTask2, TaskState.SUCCEEDED);
//wait for reduce to be running before sending done
app.waitForState(reduceTask, TaskState.RUNNING);
//send the done signal to the reduce
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct", jobStartTime, job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct", task1StartTime, mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct", task1FinishTime, mapTask1.getReport().getFinishTime());
Assert.assertEquals(2, job.getAMInfos().size());
int attemptNum = 1;
// Verify AMInfo
for (AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++, amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal = job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal = job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
// TODO Add verification of additional data from jobHistory - whatever was
// available in the failed attempt should be available here
}
use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.
the class TestRecovery method testOutputRecovery.
@Test
public void testOutputRecovery() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(1, 2, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task reduceTask1 = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
//send the done signal to the map
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(), TaskAttemptEventType.TA_DONE));
//wait for map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
// Verify the shuffle-port
Assert.assertEquals(5467, task1Attempt1.getShufflePort());
app.waitForState(reduceTask1, TaskState.RUNNING);
TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next();
// write output corresponding to reduce1
writeOutput(reduce1Attempt1, conf);
//send the done signal to the 1st reduce
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(), TaskAttemptEventType.TA_DONE));
//wait for first reduce task to complete
app.waitForState(reduceTask1, TaskState.SUCCEEDED);
//stop the app before the job completes.
app.stop();
//rerun
//in rerun the map will be recovered from previous run
app = new MRAppWithHistory(1, 2, false, this.getClass().getName(), false, ++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
reduceTask1 = it.next();
Task reduceTask2 = it.next();
// map will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
// Verify the shuffle-port after recovery
task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467, task1Attempt1.getShufflePort());
// first reduce will be recovered, no need to send done
app.waitForState(reduceTask1, TaskState.SUCCEEDED);
app.waitForState(reduceTask2, TaskState.RUNNING);
TaskAttempt reduce2Attempt = reduceTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(reduce2Attempt, TaskAttemptState.RUNNING);
//send the done signal to the 2nd reduce task
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce2Attempt.getID(), TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(reduceTask2, TaskState.SUCCEEDED);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.
the class TestRecovery method testRecoveryFailsUsingCustomOutputCommitter.
/**
* This test case primarily verifies if the recovery is controlled through config
* property. In this case, recover is turned OFF. AM with 3 maps and 0 reduce.
* AM crashes after the first two tasks finishes and recovery fails and have
* to rerun fully in the second generation and succeeds.
*
* @throws Exception
*/
@Test
public void testRecoveryFailsUsingCustomOutputCommitter() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(3, 0, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
conf.setClass("mapred.output.committer.class", TestFileOutputCommitter.class, org.apache.hadoop.mapred.OutputCommitter.class);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean("want.am.recovery", false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
// all maps would be running
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task mapTask3 = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
app.waitForState(mapTask3, TaskState.RUNNING);
TaskAttempt task1Attempt = mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
TaskAttempt task3Attempt = mapTask3.getAttempts().values().iterator().next();
// before sending the TA_DONE, event make sure attempt has come to
// RUNNING state
app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
app.waitForState(task3Attempt, TaskAttemptState.RUNNING);
// send the done signal to the 1st two maps
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(), TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(), TaskAttemptEventType.TA_DONE));
// wait for first two map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.SUCCEEDED);
// stop the app
app.stop();
// rerun
// in rerun the 1st two map will be recovered from previous run
app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false, ++runCount);
conf = new Configuration();
conf.setClass("mapred.output.committer.class", TestFileOutputCommitter.class, org.apache.hadoop.mapred.OutputCommitter.class);
conf.setBoolean("want.am.recovery", false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
// Set num-reduces explicitly in conf as recovery logic depends on it.
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
mapTask3 = it.next();
// first two maps will NOT be recovered, need to send done from them
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
app.waitForState(mapTask3, TaskState.RUNNING);
task3Attempt = mapTask3.getAttempts().values().iterator().next();
// before sending the TA_DONE, event make sure attempt has come to
// RUNNING state
app.waitForState(task3Attempt, TaskAttemptState.RUNNING);
// send the done signal to all 3 tasks map task
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask1.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
// wait to get it completed
app.waitForState(mapTask3, TaskState.SUCCEEDED);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
}
use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.
the class TestJobHistoryParsing method testScanningOldDirs.
@Test(timeout = 50000)
public void testScanningOldDirs() throws Exception {
LOG.info("STARTING testScanningOldDirs");
try {
Configuration conf = new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
HistoryFileManagerForTest hfm = new HistoryFileManagerForTest();
hfm.init(conf);
HistoryFileInfo fileInfo = hfm.getFileInfo(jobId);
Assert.assertNotNull("Unable to locate job history", fileInfo);
// force the manager to "forget" the job
hfm.deleteJobFromJobListCache(fileInfo);
final int msecPerSleep = 10;
int msecToSleep = 10 * 1000;
while (fileInfo.isMovePending() && msecToSleep > 0) {
Assert.assertTrue(!fileInfo.didMoveFail());
msecToSleep -= msecPerSleep;
Thread.sleep(msecPerSleep);
}
Assert.assertTrue("Timeout waiting for history move", msecToSleep > 0);
fileInfo = hfm.getFileInfo(jobId);
hfm.stop();
Assert.assertNotNull("Unable to locate old job history", fileInfo);
Assert.assertTrue("HistoryFileManager not shutdown properly", hfm.moveToDoneExecutor.isTerminated());
} finally {
LOG.info("FINISHED testScanningOldDirs");
}
}
use of org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory in project hadoop by apache.
the class TestJobHistoryParsing method checkHistoryParsing.
private void checkHistoryParsing(final int numMaps, final int numReduces, final int numSuccessfulMaps) throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
long amStartTimeEst = System.currentTimeMillis();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistory(numMaps, numReduces, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
String jobhistoryDir = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
} catch (IOException ioe) {
LOG.info("Can not get FileContext", ioe);
throw (new Exception("Can not get File Context"));
}
if (numMaps == numSuccessfulMaps) {
String summaryFileName = JobHistoryUtils.getIntermediateSummaryFileName(jobId);
Path summaryFile = new Path(jobhistoryDir, summaryFileName);
String jobSummaryString = getJobSummary(fc, summaryFile);
Assert.assertNotNull(jobSummaryString);
Assert.assertTrue(jobSummaryString.contains("resourcesPerMap=100"));
Assert.assertTrue(jobSummaryString.contains("resourcesPerReduce=100"));
Map<String, String> jobSummaryElements = new HashMap<String, String>();
StringTokenizer strToken = new StringTokenizer(jobSummaryString, ",");
while (strToken.hasMoreTokens()) {
String keypair = strToken.nextToken();
jobSummaryElements.put(keypair.split("=")[0], keypair.split("=")[1]);
}
Assert.assertEquals("JobId does not match", jobId.toString(), jobSummaryElements.get("jobId"));
Assert.assertEquals("JobName does not match", "test", jobSummaryElements.get("jobName"));
Assert.assertTrue("submitTime should not be 0", Long.parseLong(jobSummaryElements.get("submitTime")) != 0);
Assert.assertTrue("launchTime should not be 0", Long.parseLong(jobSummaryElements.get("launchTime")) != 0);
Assert.assertTrue("firstMapTaskLaunchTime should not be 0", Long.parseLong(jobSummaryElements.get("firstMapTaskLaunchTime")) != 0);
Assert.assertTrue("firstReduceTaskLaunchTime should not be 0", Long.parseLong(jobSummaryElements.get("firstReduceTaskLaunchTime")) != 0);
Assert.assertTrue("finishTime should not be 0", Long.parseLong(jobSummaryElements.get("finishTime")) != 0);
Assert.assertEquals("Mismatch in num map slots", numSuccessfulMaps, Integer.parseInt(jobSummaryElements.get("numMaps")));
Assert.assertEquals("Mismatch in num reduce slots", numReduces, Integer.parseInt(jobSummaryElements.get("numReduces")));
Assert.assertEquals("User does not match", System.getProperty("user.name"), jobSummaryElements.get("user"));
Assert.assertEquals("Queue does not match", "default", jobSummaryElements.get("queue"));
Assert.assertEquals("Status does not match", "SUCCEEDED", jobSummaryElements.get("status"));
}
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
JobInfo jobInfo;
long numFinishedMaps;
synchronized (fileInfo) {
Path historyFilePath = fileInfo.getHistoryFile();
FSDataInputStream in = null;
LOG.info("JobHistoryFile is: " + historyFilePath);
try {
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
JobHistoryParser parser = new JobHistoryParser(in);
final EventReader realReader = new EventReader(in);
EventReader reader = Mockito.mock(EventReader.class);
if (numMaps == numSuccessfulMaps) {
reader = realReader;
} else {
// Hack!
final AtomicInteger numFinishedEvents = new AtomicInteger(0);
Mockito.when(reader.getNextEvent()).thenAnswer(new Answer<HistoryEvent>() {
public HistoryEvent answer(InvocationOnMock invocation) throws IOException {
HistoryEvent event = realReader.getNextEvent();
if (event instanceof TaskFinishedEvent) {
numFinishedEvents.incrementAndGet();
}
if (numFinishedEvents.get() <= numSuccessfulMaps) {
return event;
} else {
throw new IOException("test");
}
}
});
}
jobInfo = parser.parse(reader);
numFinishedMaps = computeFinishedMaps(jobInfo, numMaps, numSuccessfulMaps);
if (numFinishedMaps != numMaps) {
Exception parseException = parser.getParseException();
Assert.assertNotNull("Didn't get expected parse exception", parseException);
}
}
Assert.assertEquals("Incorrect username ", System.getProperty("user.name"), jobInfo.getUsername());
Assert.assertEquals("Incorrect jobName ", "test", jobInfo.getJobname());
Assert.assertEquals("Incorrect queuename ", "default", jobInfo.getJobQueueName());
Assert.assertEquals("incorrect conf path", "test", jobInfo.getJobConfPath());
Assert.assertEquals("incorrect finishedMap ", numSuccessfulMaps, numFinishedMaps);
Assert.assertEquals("incorrect finishedReduces ", numReduces, jobInfo.getFinishedReduces());
Assert.assertEquals("incorrect uberized ", job.isUber(), jobInfo.getUberized());
Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
int totalTasks = allTasks.size();
Assert.assertEquals("total number of tasks is incorrect ", (numMaps + numReduces), totalTasks);
// Verify aminfo
Assert.assertEquals(1, jobInfo.getAMInfos().size());
Assert.assertEquals(MRApp.NM_HOST, jobInfo.getAMInfos().get(0).getNodeManagerHost());
AMInfo amInfo = jobInfo.getAMInfos().get(0);
Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
Assert.assertEquals(1, amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId().getApplicationAttemptId());
Assert.assertTrue(amInfo.getStartTime() <= System.currentTimeMillis() && amInfo.getStartTime() >= amStartTimeEst);
ContainerId fakeCid = MRApp.newContainerId(-1, -1, -1, -1);
// Assert at taskAttempt level
for (TaskInfo taskInfo : allTasks.values()) {
int taskAttemptCount = taskInfo.getAllTaskAttempts().size();
Assert.assertEquals("total number of task attempts ", 1, taskAttemptCount);
TaskAttemptInfo taInfo = taskInfo.getAllTaskAttempts().values().iterator().next();
Assert.assertNotNull(taInfo.getContainerId());
// Verify the wrong ctor is not being used. Remove after mrv1 is removed.
Assert.assertFalse(taInfo.getContainerId().equals(fakeCid));
}
// Deep compare Job and JobInfo
for (Task task : job.getTasks().values()) {
TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
Assert.assertNotNull("TaskInfo not found", taskInfo);
for (TaskAttempt taskAttempt : task.getAttempts().values()) {
TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID())));
Assert.assertNotNull("TaskAttemptInfo not found", taskAttemptInfo);
Assert.assertEquals("Incorrect shuffle port for task attempt", taskAttempt.getShufflePort(), taskAttemptInfo.getShufflePort());
if (numMaps == numSuccessfulMaps) {
Assert.assertEquals(MRApp.NM_HOST, taskAttemptInfo.getHostname());
Assert.assertEquals(MRApp.NM_PORT, taskAttemptInfo.getPort());
// Verify rack-name
Assert.assertEquals("rack-name is incorrect", taskAttemptInfo.getRackname(), RACK_NAME);
}
}
}
// test output for HistoryViewer
PrintStream stdps = System.out;
try {
System.setOut(new PrintStream(outContent));
HistoryViewer viewer;
synchronized (fileInfo) {
viewer = new HistoryViewer(fc.makeQualified(fileInfo.getHistoryFile()).toString(), conf, true);
}
viewer.print();
for (TaskInfo taskInfo : allTasks.values()) {
String test = (taskInfo.getTaskStatus() == null ? "" : taskInfo.getTaskStatus()) + " " + taskInfo.getTaskType() + " task list for " + taskInfo.getTaskId().getJobID();
Assert.assertTrue(outContent.toString().indexOf(test) > 0);
Assert.assertTrue(outContent.toString().indexOf(taskInfo.getTaskId().toString()) > 0);
}
} finally {
System.setOut(stdps);
}
}
Aggregations