use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.
the class TestJobHistoryParsing method testHistoryParsingForFailedAttempts.
@Test(timeout = 30000)
public void testHistoryParsingForFailedAttempts() throws Exception {
LOG.info("STARTING testHistoryParsingForFailedAttempts");
try {
Configuration conf = new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistoryWithFailedAttempt(2, 1, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath = fileInfo.getHistoryFile();
FSDataInputStream in = null;
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
parser = new JobHistoryParser(in);
jobInfo = parser.parse();
}
Exception parseException = parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException, parseException);
int noOffailedAttempts = 0;
Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
for (Task task : job.getTasks().values()) {
TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
for (TaskAttempt taskAttempt : task.getAttempts().values()) {
TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID())));
// Verify rack-name for all task attempts
Assert.assertEquals("rack-name is incorrect", taskAttemptInfo.getRackname(), RACK_NAME);
if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
noOffailedAttempts++;
}
}
}
Assert.assertEquals("No of Failed tasks doesn't match.", 2, noOffailedAttempts);
} finally {
LOG.info("FINISHED testHistoryParsingForFailedAttempts");
}
}
use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.
the class TestJobInfo method testAverageReduceTime.
@Test
public void testAverageReduceTime() {
Job job = mock(CompletedJob.class);
final Task task1 = mock(Task.class);
final Task task2 = mock(Task.class);
JobId jobId = MRBuilderUtils.newJobId(1L, 1, 1);
final TaskId taskId1 = MRBuilderUtils.newTaskId(jobId, 1, TaskType.REDUCE);
final TaskId taskId2 = MRBuilderUtils.newTaskId(jobId, 2, TaskType.REDUCE);
final TaskAttemptId taskAttemptId1 = MRBuilderUtils.newTaskAttemptId(taskId1, 1);
final TaskAttemptId taskAttemptId2 = MRBuilderUtils.newTaskAttemptId(taskId2, 2);
final TaskAttempt taskAttempt1 = mock(TaskAttempt.class);
final TaskAttempt taskAttempt2 = mock(TaskAttempt.class);
JobReport jobReport = mock(JobReport.class);
when(taskAttempt1.getState()).thenReturn(TaskAttemptState.SUCCEEDED);
when(taskAttempt1.getLaunchTime()).thenReturn(0L);
when(taskAttempt1.getShuffleFinishTime()).thenReturn(4L);
when(taskAttempt1.getSortFinishTime()).thenReturn(6L);
when(taskAttempt1.getFinishTime()).thenReturn(8L);
when(taskAttempt2.getState()).thenReturn(TaskAttemptState.SUCCEEDED);
when(taskAttempt2.getLaunchTime()).thenReturn(5L);
when(taskAttempt2.getShuffleFinishTime()).thenReturn(10L);
when(taskAttempt2.getSortFinishTime()).thenReturn(22L);
when(taskAttempt2.getFinishTime()).thenReturn(42L);
when(task1.getType()).thenReturn(TaskType.REDUCE);
when(task2.getType()).thenReturn(TaskType.REDUCE);
when(task1.getAttempts()).thenReturn(new HashMap<TaskAttemptId, TaskAttempt>() {
{
put(taskAttemptId1, taskAttempt1);
}
});
when(task2.getAttempts()).thenReturn(new HashMap<TaskAttemptId, TaskAttempt>() {
{
put(taskAttemptId2, taskAttempt2);
}
});
when(job.getTasks()).thenReturn(new HashMap<TaskId, Task>() {
{
put(taskId1, task1);
put(taskId2, task2);
}
});
when(job.getID()).thenReturn(jobId);
when(job.getReport()).thenReturn(jobReport);
when(job.getName()).thenReturn("TestJobInfo");
when(job.getState()).thenReturn(JobState.SUCCEEDED);
JobInfo jobInfo = new JobInfo(job);
Assert.assertEquals(11L, jobInfo.getAvgReduceTime().longValue());
}
use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.
the class TestJobInfo method testAverageMergeTime.
@Test(timeout = 10000)
public void testAverageMergeTime() throws IOException {
String historyFileName = "job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
String confFileName = "job_1329348432655_0001_conf.xml";
Configuration conf = new Configuration();
JobACLsManager jobAclsMgr = new JobACLsManager(conf);
Path fulleHistoryPath = new Path(TestJobHistoryEntities.class.getClassLoader().getResource(historyFileName).getFile());
Path fullConfPath = new Path(TestJobHistoryEntities.class.getClassLoader().getResource(confFileName).getFile());
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
when(info.getHistoryFile()).thenReturn(fulleHistoryPath);
JobId jobId = MRBuilderUtils.newJobId(1329348432655l, 1, 1);
CompletedJob completedJob = new CompletedJob(conf, jobId, fulleHistoryPath, true, "user", info, jobAclsMgr);
JobInfo jobInfo = new JobInfo(completedJob);
// There are 2 tasks with merge time of 45 and 55 respectively. So average
// merge time should be 50.
Assert.assertEquals(50L, jobInfo.getAvgMergeTime().longValue());
}
use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.
the class TestJobInfo method testGetStartTimeStr.
@Test
public void testGetStartTimeStr() {
JobReport jobReport = mock(JobReport.class);
when(jobReport.getStartTime()).thenReturn(-1L);
Job job = mock(Job.class);
when(job.getReport()).thenReturn(jobReport);
when(job.getName()).thenReturn("TestJobInfo");
when(job.getState()).thenReturn(JobState.SUCCEEDED);
JobId jobId = MRBuilderUtils.newJobId(1L, 1, 1);
when(job.getID()).thenReturn(jobId);
JobInfo jobInfo = new JobInfo(job);
Assert.assertEquals(JobInfo.NA, jobInfo.getStartTimeStr());
Date date = new Date();
when(jobReport.getStartTime()).thenReturn(date.getTime());
jobInfo = new JobInfo(job);
Assert.assertEquals(date.toString(), jobInfo.getStartTimeStr());
}
use of org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo in project hadoop by apache.
the class JobHistoryFileReplayMapperV2 method writeEntities.
@Override
protected void writeEntities(Configuration tlConf, TimelineCollectorManager manager, Context context) throws IOException {
JobHistoryFileReplayHelper helper = new JobHistoryFileReplayHelper(context);
int replayMode = helper.getReplayMode();
JobHistoryFileParser parser = helper.getParser();
TimelineEntityConverterV2 converter = new TimelineEntityConverterV2();
// collect the apps it needs to process
Collection<JobFiles> jobs = helper.getJobFiles();
if (jobs.isEmpty()) {
LOG.info(context.getTaskAttemptID().getTaskID() + " will process no jobs");
} else {
LOG.info(context.getTaskAttemptID().getTaskID() + " will process " + jobs.size() + " jobs");
}
for (JobFiles job : jobs) {
// process each job
String jobIdStr = job.getJobId();
// skip if either of the file is missing
if (job.getJobConfFilePath() == null || job.getJobHistoryFilePath() == null) {
LOG.info(jobIdStr + " missing either the job history file or the " + "configuration file. Skipping.");
continue;
}
LOG.info("processing " + jobIdStr + "...");
JobId jobId = TypeConverter.toYarn(JobID.forName(jobIdStr));
ApplicationId appId = jobId.getAppId();
// create the app level timeline collector and start it
AppLevelTimelineCollector collector = new AppLevelTimelineCollector(appId);
manager.putIfAbsent(appId, collector);
try {
// parse the job info and configuration
JobInfo jobInfo = parser.parseHistoryFile(job.getJobHistoryFilePath());
Configuration jobConf = parser.parseConfiguration(job.getJobConfFilePath());
LOG.info("parsed the job history file and the configuration file " + "for job " + jobIdStr);
// set the context
// flow id: job name, flow run id: timestamp, user id
TimelineCollectorContext tlContext = collector.getTimelineEntityContext();
tlContext.setFlowName(jobInfo.getJobname());
tlContext.setFlowRunId(jobInfo.getSubmitTime());
tlContext.setUserId(jobInfo.getUsername());
// create entities from job history and write them
long totalTime = 0;
List<TimelineEntity> entitySet = converter.createTimelineEntities(jobInfo, jobConf);
LOG.info("converted them into timeline entities for job " + jobIdStr);
// use the current user for this purpose
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
long startWrite = System.nanoTime();
try {
switch(replayMode) {
case JobHistoryFileReplayHelper.WRITE_ALL_AT_ONCE:
writeAllEntities(collector, entitySet, ugi);
break;
case JobHistoryFileReplayHelper.WRITE_PER_ENTITY:
writePerEntity(collector, entitySet, ugi);
break;
default:
break;
}
} catch (Exception e) {
context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_FAILURES).increment(1);
LOG.error("writing to the timeline service failed", e);
}
long endWrite = System.nanoTime();
totalTime += TimeUnit.NANOSECONDS.toMillis(endWrite - startWrite);
int numEntities = entitySet.size();
LOG.info("wrote " + numEntities + " entities in " + totalTime + " ms");
context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_TIME).increment(totalTime);
context.getCounter(PerfCounters.TIMELINE_SERVICE_WRITE_COUNTER).increment(numEntities);
} finally {
manager.remove(appId);
// move it along
context.progress();
}
}
}
Aggregations