use of org.apache.hadoop.mapreduce.v2.hs.JobHistory in project hadoop by apache.
the class TestRecovery method testCrashed.
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
*
* @throws Exception
*/
@Test
public void testCrashed() throws Exception {
int runCount = 0;
long am1StartTimeEst = System.currentTimeMillis();
MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
long jobStartTime = job.getReport().getStartTime();
//all maps would be running
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
app.waitForState(reduceTask, TaskState.RUNNING);
/////////// Play some games with the TaskAttempts of the first task //////
//send the fail signal to the 1st map task attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(), TaskAttemptEventType.TA_FAILMSG));
app.waitForState(task1Attempt1, TaskAttemptState.FAILED);
int timeOut = 0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(2, mapTask1.getAttempts().size());
Iterator<TaskAttempt> itr = mapTask1.getAttempts().values().iterator();
itr.next();
TaskAttempt task1Attempt2 = itr.next();
// wait for the second task attempt to be assigned.
waitForContainerAssignment(task1Attempt2);
// This attempt will automatically fail because of the way ContainerLauncher
// is setup
// This attempt 'disappears' from JobHistory and so causes MAPREDUCE-3846
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt2.getID(), TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
app.waitForState(task1Attempt2, TaskAttemptState.FAILED);
timeOut = 0;
while (mapTask1.getAttempts().size() != 3 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(3, mapTask1.getAttempts().size());
itr = mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
TaskAttempt task1Attempt3 = itr.next();
app.waitForState(task1Attempt3, TaskAttemptState.RUNNING);
//send the kill signal to the 1st map 3rd attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt3.getID(), TaskAttemptEventType.TA_KILL));
app.waitForState(task1Attempt3, TaskAttemptState.KILLED);
timeOut = 0;
while (mapTask1.getAttempts().size() != 4 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(4, mapTask1.getAttempts().size());
itr = mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
itr.next();
TaskAttempt task1Attempt4 = itr.next();
app.waitForState(task1Attempt4, TaskAttemptState.RUNNING);
//send the done signal to the 1st map 4th attempt
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt4.getID(), TaskAttemptEventType.TA_DONE));
/////////// End of games with the TaskAttempts of the first task //////
//wait for first map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
long task1StartTime = mapTask1.getReport().getStartTime();
long task1FinishTime = mapTask1.getReport().getFinishTime();
//stop the app
app.stop();
//rerun
//in rerun the 1st map will be recovered from previous run
long am2StartTimeEst = System.currentTimeMillis();
app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false, ++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
reduceTask = it.next();
// first map will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.RUNNING);
task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
//send the done signal to the 2nd map task
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(mapTask2, TaskState.SUCCEEDED);
//wait for reduce to be running before sending done
app.waitForState(reduceTask, TaskState.RUNNING);
//send the done signal to the reduce
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(), TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct", jobStartTime, job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct", task1StartTime, mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct", task1FinishTime, mapTask1.getReport().getFinishTime());
Assert.assertEquals(2, job.getAMInfos().size());
int attemptNum = 1;
// Verify AMInfo
for (AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++, amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal = job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal = job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
// TODO Add verification of additional data from jobHistory - whatever was
// available in the failed attempt should be available here
}
use of org.apache.hadoop.mapreduce.v2.hs.JobHistory in project hadoop by apache.
the class ConfBlock method render.
/*
* (non-Javadoc)
* @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block)
*/
@Override
protected void render(Block html) {
String jid = $(JOB_ID);
if (jid.isEmpty()) {
html.p()._("Sorry, can't do anything without a JobID.")._();
return;
}
JobId jobID = MRApps.toJobID(jid);
Job job = appContext.getJob(jobID);
if (job == null) {
html.p()._("Sorry, ", jid, " not found.")._();
return;
}
Path confPath = job.getConfFile();
try {
ConfInfo info = new ConfInfo(job);
html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString());
TBODY<TABLE<Hamlet>> tbody = html.table("#conf").thead().tr().th(_TH, "key").th(_TH, "value").th(_TH, "source chain")._()._().tbody();
for (ConfEntryInfo entry : info.getProperties()) {
StringBuffer buffer = new StringBuffer();
String[] sources = entry.getSource();
//Skip the last entry, because it is always the same HDFS file, and
// output them in reverse order so most recent is output first
boolean first = true;
for (int i = (sources.length - 2); i >= 0; i--) {
if (!first) {
buffer.append(" <- ");
}
first = false;
buffer.append(sources[i]);
}
tbody.tr().td(entry.getName()).td(entry.getValue()).td(buffer.toString())._();
}
tbody._().tfoot().tr().th().input("search_init").$type(InputType.text).$name("key").$value("key")._()._().th().input("search_init").$type(InputType.text).$name("value").$value("value")._()._().th().input("search_init").$type(InputType.text).$name("source chain").$value("source chain")._()._()._()._()._();
} catch (IOException e) {
LOG.error("Error while reading " + confPath, e);
html.p()._("Sorry got an error while reading conf file. ", confPath);
}
}
use of org.apache.hadoop.mapreduce.v2.hs.JobHistory in project hadoop by apache.
the class TestFileNameIndexUtils method testJobNameWithMultibyteChars.
/**
* Verify the name of jobhistory file is not greater than 255 bytes
* even if there are some multibyte characters in the job name.
*/
@Test
public void testJobNameWithMultibyteChars() throws IOException {
JobIndexInfo info = new JobIndexInfo();
JobID oldJobId = JobID.forName(JOB_ID);
JobId jobId = TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
StringBuilder sb = new StringBuilder();
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
// which is encoded into 1 x 3 = 3 characters by URL encode.
for (int i = 0; i < 100; i++) {
sb.append('%');
}
String longJobName = sb.toString();
info.setJobName(longJobName);
String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info, 50);
Assert.assertTrue(jobHistoryFile.length() <= 255);
String trimedJobName = jobHistoryFile.split(FileNameIndexUtils.DELIMITER)[// 3 is index of job name
3];
// 3 x 16 < 50 < 3 x 17 so the length of trimedJobName should be 48
Assert.assertEquals(48, trimedJobName.getBytes(UTF_8).length);
// validate whether trimmedJobName by testing reversibility
byte[] trimedJobNameInByte = trimedJobName.getBytes(UTF_8);
String reEncodedTrimedJobName = new String(trimedJobNameInByte, UTF_8);
Assert.assertArrayEquals(trimedJobNameInByte, reEncodedTrimedJobName.getBytes(UTF_8));
sb.setLength(0);
// which is encoded into 2 x 3 = 6 characters by URL encode.
for (int i = 0; i < 100; i++) {
// large omega
sb.append('Ω');
}
longJobName = sb.toString();
info.setJobName(longJobName);
jobHistoryFile = FileNameIndexUtils.getDoneFileName(info, 27);
Assert.assertTrue(jobHistoryFile.length() <= 255);
trimedJobName = jobHistoryFile.split(FileNameIndexUtils.DELIMITER)[// 3 is index of job name
3];
// 6 x 4 < 27 < 6 x 5 so the length of trimedJobName should be 24
Assert.assertEquals(24, trimedJobName.getBytes(UTF_8).length);
// validate whether trimmedJobName by testing reversibility
trimedJobNameInByte = trimedJobName.getBytes(UTF_8);
reEncodedTrimedJobName = new String(trimedJobNameInByte, UTF_8);
Assert.assertArrayEquals(trimedJobNameInByte, reEncodedTrimedJobName.getBytes(UTF_8));
sb.setLength(0);
// which is encoded into 3 x 3 = 9 characters by URL encode.
for (int i = 0; i < 100; i++) {
// rightwards arrow
sb.append('→');
}
longJobName = sb.toString();
info.setJobName(longJobName);
jobHistoryFile = FileNameIndexUtils.getDoneFileName(info, 40);
Assert.assertTrue(jobHistoryFile.length() <= 255);
trimedJobName = jobHistoryFile.split(FileNameIndexUtils.DELIMITER)[// 3 is index of job name
3];
// 9 x 4 < 40 < 9 x 5 so the length of trimedJobName should be 36
Assert.assertEquals(36, trimedJobName.getBytes(UTF_8).length);
// validate whether trimmedJobName by testing reversibility
trimedJobNameInByte = trimedJobName.getBytes(UTF_8);
reEncodedTrimedJobName = new String(trimedJobNameInByte, UTF_8);
Assert.assertArrayEquals(trimedJobNameInByte, reEncodedTrimedJobName.getBytes(UTF_8));
sb.setLength(0);
// which is encoded into 4 x 3 = 12 characters by URL encode.
for (int i = 0; i < 100; i++) {
// Mugil cephalus in Kanji.
sb.append("𩸽");
}
longJobName = sb.toString();
info.setJobName(longJobName);
jobHistoryFile = FileNameIndexUtils.getDoneFileName(info, 49);
Assert.assertTrue(jobHistoryFile.length() <= 255);
trimedJobName = jobHistoryFile.split(FileNameIndexUtils.DELIMITER)[// 3 is index of job name
3];
// 12 x 4 < 49 < 12 x 5 so the length of trimedJobName should be 48
Assert.assertEquals(48, trimedJobName.getBytes(UTF_8).length);
// validate whether trimmedJobName by testing reversibility
trimedJobNameInByte = trimedJobName.getBytes(UTF_8);
reEncodedTrimedJobName = new String(trimedJobNameInByte, UTF_8);
Assert.assertArrayEquals(trimedJobNameInByte, reEncodedTrimedJobName.getBytes(UTF_8));
sb.setLength(0);
// Test for the combination of 1 to 4 bytes UTF-8 characters
// cat in Kanji (encoded into 3 bytes x 3 characters)
sb.append('猫').append(// (encoded into 1 byte x 3 characters)
"[").append(// small lambda (encoded into 2 bytes x 3 characters)
'λ').append(// (encoded into 1 byte x 3 characters)
'/').append(// not url-encoded (1 byte x 1 character)
'A').append(// flying fish in
"𩹉").append(// dog in Kanji (encoded into 3 bytes x 3 characters)
'犬');
longJobName = sb.toString();
info.setJobName(longJobName);
jobHistoryFile = FileNameIndexUtils.getDoneFileName(info, 23);
Assert.assertTrue(jobHistoryFile.length() <= 255);
trimedJobName = jobHistoryFile.split(FileNameIndexUtils.DELIMITER)[// 3 is index of job name
3];
// total size of the first 5 characters = 22
// 23 < total size of the first 6 characters
Assert.assertEquals(22, trimedJobName.getBytes(UTF_8).length);
// validate whether trimmedJobName by testing reversibility
trimedJobNameInByte = trimedJobName.getBytes(UTF_8);
reEncodedTrimedJobName = new String(trimedJobNameInByte, UTF_8);
Assert.assertArrayEquals(trimedJobNameInByte, reEncodedTrimedJobName.getBytes(UTF_8));
}
use of org.apache.hadoop.mapreduce.v2.hs.JobHistory in project hadoop by apache.
the class HistoryClientService method initializeWebApp.
@VisibleForTesting
protected void initializeWebApp(Configuration conf) {
webApp = new HsWebApp(history);
InetSocketAddress bindAddress = MRWebAppUtil.getJHSWebBindAddress(conf);
// NOTE: there should be a .at(InetSocketAddress)
WebApps.$for("jobhistory", HistoryClientService.class, this, "ws").with(conf).withHttpSpnegoKeytabKey(JHAdminConfig.MR_WEBAPP_SPNEGO_KEYTAB_FILE_KEY).withHttpSpnegoPrincipalKey(JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY).withCSRFProtection(JHAdminConfig.MR_HISTORY_CSRF_PREFIX).withXFSProtection(JHAdminConfig.MR_HISTORY_XFS_PREFIX).at(NetUtils.getHostPortString(bindAddress)).start(webApp);
String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0];
MRWebAppUtil.setJHSWebappURLWithoutScheme(conf, connectHost + ":" + webApp.getListenerAddress().getPort());
}
use of org.apache.hadoop.mapreduce.v2.hs.JobHistory in project hadoop by apache.
the class JobHistoryServer method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
Configuration config = new YarnConfiguration(conf);
config.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
// This is required for WebApps to use https if enabled.
MRWebAppUtil.initialize(getConfig());
try {
doSecureLogin(conf);
} catch (IOException ie) {
throw new YarnRuntimeException("History Server Failed to login", ie);
}
jobHistoryService = new JobHistory();
historyContext = (HistoryContext) jobHistoryService;
stateStore = createStateStore(conf);
this.jhsDTSecretManager = createJHSSecretManager(conf, stateStore);
clientService = createHistoryClientService();
aggLogDelService = new AggregatedLogDeletionService();
hsAdminServer = new HSAdminServer(aggLogDelService, jobHistoryService);
addService(stateStore);
addService(new HistoryServerSecretManagerService());
addService(jobHistoryService);
addService(clientService);
addService(aggLogDelService);
addService(hsAdminServer);
DefaultMetricsSystem.initialize("JobHistoryServer");
JvmMetrics jm = JvmMetrics.initSingleton("JobHistoryServer", null);
pauseMonitor = new JvmPauseMonitor();
addService(pauseMonitor);
jm.setPauseMonitor(pauseMonitor);
super.serviceInit(config);
}
Aggregations