use of org.apache.hadoop.mapreduce.v2.hs.HistoryContext in project hadoop by apache.
the class JobHistoryServer method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
Configuration config = new YarnConfiguration(conf);
config.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
// This is required for WebApps to use https if enabled.
MRWebAppUtil.initialize(getConfig());
try {
doSecureLogin(conf);
} catch (IOException ie) {
throw new YarnRuntimeException("History Server Failed to login", ie);
}
jobHistoryService = new JobHistory();
historyContext = (HistoryContext) jobHistoryService;
stateStore = createStateStore(conf);
this.jhsDTSecretManager = createJHSSecretManager(conf, stateStore);
clientService = createHistoryClientService();
aggLogDelService = new AggregatedLogDeletionService();
hsAdminServer = new HSAdminServer(aggLogDelService, jobHistoryService);
addService(stateStore);
addService(new HistoryServerSecretManagerService());
addService(jobHistoryService);
addService(clientService);
addService(aggLogDelService);
addService(hsAdminServer);
DefaultMetricsSystem.initialize("JobHistoryServer");
JvmMetrics jm = JvmMetrics.initSingleton("JobHistoryServer", null);
pauseMonitor = new JvmPauseMonitor();
addService(pauseMonitor);
jm.setPauseMonitor(pauseMonitor);
super.serviceInit(config);
}
use of org.apache.hadoop.mapreduce.v2.hs.HistoryContext in project hadoop by apache.
the class TestJobHistoryEvents method testEventsFlushOnStop.
/**
* Verify that all the events are flushed on stopping the HistoryHandler
* @throws Exception
*/
@Test
public void testEventsFlushOnStop() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithSpecialHistoryHandler(1, 0, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
((JobHistory) context).init(conf);
Job parsedJob = context.getJob(jobId);
Assert.assertEquals("CompletedMaps not correct", 1, parsedJob.getCompletedMaps());
Map<TaskId, Task> tasks = parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct", 1, tasks.size());
verifyTask(tasks.values().iterator().next());
Map<TaskId, Task> maps = parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct", 1, maps.size());
Assert.assertEquals("Job state not currect", JobState.SUCCEEDED, parsedJob.getState());
}
use of org.apache.hadoop.mapreduce.v2.hs.HistoryContext in project hadoop by apache.
the class TestJobHistoryEvents method testAssignedQueue.
@Test
public void testAssignedQueue() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(), true, "assignedQueue");
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
//make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
// test start and stop states
((JobHistory) context).init(conf);
((JobHistory) context).start();
Assert.assertTrue(context.getStartTime() > 0);
Assert.assertEquals(((JobHistory) context).getServiceState(), Service.STATE.STARTED);
// get job before stopping JobHistory
Job parsedJob = context.getJob(jobId);
// stop JobHistory
((JobHistory) context).stop();
Assert.assertEquals(((JobHistory) context).getServiceState(), Service.STATE.STOPPED);
Assert.assertEquals("QueueName not correct", "assignedQueue", parsedJob.getQueueName());
}
use of org.apache.hadoop.mapreduce.v2.hs.HistoryContext in project hadoop by apache.
the class TestHsWebServicesAcls method buildHistoryContext.
private static HistoryContext buildHistoryContext(final Configuration conf) throws IOException {
HistoryContext ctx = new MockHistoryContext(1, 1, 1);
Map<JobId, Job> jobs = ctx.getAllJobs();
JobId jobId = jobs.keySet().iterator().next();
Job mockJob = new MockJobForAcls(jobs.get(jobId), conf);
jobs.put(jobId, mockJob);
return ctx;
}
use of org.apache.hadoop.mapreduce.v2.hs.HistoryContext in project hadoop by apache.
the class TestJobHistoryEvents method testHistoryEvents.
@Test
public void testHistoryEvents() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
//make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
// test start and stop states
((JobHistory) context).init(conf);
((JobHistory) context).start();
Assert.assertTrue(context.getStartTime() > 0);
Assert.assertEquals(((JobHistory) context).getServiceState(), Service.STATE.STARTED);
// get job before stopping JobHistory
Job parsedJob = context.getJob(jobId);
// stop JobHistory
((JobHistory) context).stop();
Assert.assertEquals(((JobHistory) context).getServiceState(), Service.STATE.STOPPED);
Assert.assertEquals("CompletedMaps not correct", 2, parsedJob.getCompletedMaps());
Assert.assertEquals(System.getProperty("user.name"), parsedJob.getUserName());
Map<TaskId, Task> tasks = parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct", 3, tasks.size());
for (Task task : tasks.values()) {
verifyTask(task);
}
Map<TaskId, Task> maps = parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct", 2, maps.size());
Map<TaskId, Task> reduces = parsedJob.getTasks(TaskType.REDUCE);
Assert.assertEquals("No of reduces not correct", 1, reduces.size());
Assert.assertEquals("CompletedReduce not correct", 1, parsedJob.getCompletedReduces());
Assert.assertEquals("Job state not currect", JobState.SUCCEEDED, parsedJob.getState());
}
Aggregations