use of org.apache.hadoop.mapreduce.v2.hs.JobHistory in project hadoop by apache.
the class TestJobHistory method testTasksCacheLimit.
@Test
public void testTasksCacheLimit() throws Exception {
HistoryFileManager historyManager = mock(HistoryFileManager.class);
jobHistory = spy(new JobHistory());
doReturn(historyManager).when(jobHistory).createHistoryFileManager();
Configuration conf = new Configuration();
// Set the cache threshold to 50 tasks
conf.setInt(JHAdminConfig.MR_HISTORY_LOADED_TASKS_CACHE_SIZE, 50);
jobHistory.init(conf);
jobHistory.start();
CachedHistoryStorage storage = spy((CachedHistoryStorage) jobHistory.getHistoryStorage());
assertTrue(storage.getUseLoadedTasksCache());
assertEquals(storage.getLoadedTasksCacheSize(), 50);
// Create a bunch of smaller jobs (<< 50 tasks)
Job[] jobs = new Job[10];
JobId[] jobIds = new JobId[10];
for (int i = 0; i < jobs.length; i++) {
jobs[i] = mock(Job.class);
jobIds[i] = mock(JobId.class);
when(jobs[i].getID()).thenReturn(jobIds[i]);
when(jobs[i].getTotalMaps()).thenReturn(10);
when(jobs[i].getTotalReduces()).thenReturn(2);
}
// Create some large jobs that forces task-based cache flushing
Job[] lgJobs = new Job[3];
JobId[] lgJobIds = new JobId[3];
for (int i = 0; i < lgJobs.length; i++) {
lgJobs[i] = mock(Job.class);
lgJobIds[i] = mock(JobId.class);
when(lgJobs[i].getID()).thenReturn(lgJobIds[i]);
when(lgJobs[i].getTotalMaps()).thenReturn(2000);
when(lgJobs[i].getTotalReduces()).thenReturn(10);
}
HistoryFileInfo fileInfo = mock(HistoryFileInfo.class);
when(historyManager.getFileInfo(any(JobId.class))).thenReturn(fileInfo);
when(fileInfo.loadJob()).thenReturn(jobs[0]).thenReturn(jobs[1]).thenReturn(jobs[2]).thenReturn(jobs[3]).thenReturn(jobs[4]).thenReturn(jobs[5]).thenReturn(jobs[6]).thenReturn(jobs[7]).thenReturn(jobs[8]).thenReturn(jobs[9]).thenReturn(lgJobs[0]).thenReturn(lgJobs[1]).thenReturn(lgJobs[2]);
// getFullJob will put the job in the cache if it isn't there
Cache<JobId, Job> jobCache = storage.getLoadedJobCache();
for (int i = 0; i < jobs.length; i++) {
storage.getFullJob(jobs[i].getID());
}
long prevSize = jobCache.size();
// gets reduced in size.
for (int i = 0; i < lgJobs.length; i++) {
storage.getFullJob(lgJobs[i].getID());
}
assertTrue(jobCache.size() < prevSize);
}
use of org.apache.hadoop.mapreduce.v2.hs.JobHistory in project hadoop by apache.
the class TestJobHistory method testRefreshLoadedJobCacheUnSupportedOperation.
@Test
public void testRefreshLoadedJobCacheUnSupportedOperation() {
jobHistory = spy(new JobHistory());
HistoryStorage storage = new HistoryStorage() {
@Override
public void setHistoryFileManager(HistoryFileManager hsManager) {
// TODO Auto-generated method stub
}
@Override
public JobsInfo getPartialJobs(Long offset, Long count, String user, String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd, JobState jobState) {
// TODO Auto-generated method stub
return null;
}
@Override
public Job getFullJob(JobId jobId) {
// TODO Auto-generated method stub
return null;
}
@Override
public Map<JobId, Job> getAllPartialJobs() {
// TODO Auto-generated method stub
return null;
}
};
doReturn(storage).when(jobHistory).createHistoryStorage();
jobHistory.init(new Configuration());
jobHistory.start();
Throwable th = null;
try {
jobHistory.refreshLoadedJobCache();
} catch (Exception e) {
th = e;
}
assertTrue(th instanceof UnsupportedOperationException);
}
use of org.apache.hadoop.mapreduce.v2.hs.JobHistory in project hadoop by apache.
the class TestJobHistory method testJobCacheLimitLargerThanMax.
@Test
public void testJobCacheLimitLargerThanMax() throws Exception {
HistoryFileManager historyManager = mock(HistoryFileManager.class);
JobHistory jobHistory = spy(new JobHistory());
doReturn(historyManager).when(jobHistory).createHistoryFileManager();
Configuration conf = new Configuration();
// Set the cache threshold to 50 tasks
conf.setInt(JHAdminConfig.MR_HISTORY_LOADED_TASKS_CACHE_SIZE, 500);
jobHistory.init(conf);
jobHistory.start();
CachedHistoryStorage storage = spy((CachedHistoryStorage) jobHistory.getHistoryStorage());
assertTrue(storage.getUseLoadedTasksCache());
assertEquals(storage.getLoadedTasksCacheSize(), 500);
// Create a bunch of large jobs (>> 50 tasks)
Job[] lgJobs = new Job[10];
JobId[] lgJobIds = new JobId[10];
for (int i = 0; i < lgJobs.length; i++) {
lgJobs[i] = mock(Job.class);
lgJobIds[i] = mock(JobId.class);
when(lgJobs[i].getID()).thenReturn(lgJobIds[i]);
when(lgJobs[i].getTotalMaps()).thenReturn(700);
when(lgJobs[i].getTotalReduces()).thenReturn(50);
}
HistoryFileInfo fileInfo = mock(HistoryFileInfo.class);
when(historyManager.getFileInfo(any(JobId.class))).thenReturn(fileInfo);
when(fileInfo.loadJob()).thenReturn(lgJobs[0]).thenReturn(lgJobs[1]).thenReturn(lgJobs[2]).thenReturn(lgJobs[3]).thenReturn(lgJobs[4]).thenReturn(lgJobs[5]).thenReturn(lgJobs[6]).thenReturn(lgJobs[7]).thenReturn(lgJobs[8]).thenReturn(lgJobs[9]);
// getFullJob will put the job in the cache if it isn't there
Cache<JobId, Job> jobCache = storage.getLoadedJobCache();
long[] cacheSize = new long[10];
for (int i = 0; i < lgJobs.length; i++) {
storage.getFullJob(lgJobs[i].getID());
assertTrue(jobCache.size() > 0);
}
}
use of org.apache.hadoop.mapreduce.v2.hs.JobHistory in project hadoop by apache.
the class TestJobHistoryEvents method testHistoryEvents.
@Test
public void testHistoryEvents() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
//make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
// test start and stop states
((JobHistory) context).init(conf);
((JobHistory) context).start();
Assert.assertTrue(context.getStartTime() > 0);
Assert.assertEquals(((JobHistory) context).getServiceState(), Service.STATE.STARTED);
// get job before stopping JobHistory
Job parsedJob = context.getJob(jobId);
// stop JobHistory
((JobHistory) context).stop();
Assert.assertEquals(((JobHistory) context).getServiceState(), Service.STATE.STOPPED);
Assert.assertEquals("CompletedMaps not correct", 2, parsedJob.getCompletedMaps());
Assert.assertEquals(System.getProperty("user.name"), parsedJob.getUserName());
Map<TaskId, Task> tasks = parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct", 3, tasks.size());
for (Task task : tasks.values()) {
verifyTask(task);
}
Map<TaskId, Task> maps = parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct", 2, maps.size());
Map<TaskId, Task> reduces = parsedJob.getTasks(TaskType.REDUCE);
Assert.assertEquals("No of reduces not correct", 1, reduces.size());
Assert.assertEquals("CompletedReduce not correct", 1, parsedJob.getCompletedReduces());
Assert.assertEquals("Job state not currect", JobState.SUCCEEDED, parsedJob.getState());
}
Aggregations