use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class MRAppMasterTestLaunchTime method testMRAppMasterJobLaunchTime.
@Test
public void testMRAppMasterJobLaunchTime() throws IOException, InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.set(JHAdminConfig.MR_HS_JHIST_FORMAT, "json");
ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
File dir = new File(MRApps.getStagingAreaDir(conf, userName).toString(), jobId.toString());
dir.mkdirs();
File historyFile = new File(JobHistoryUtils.getStagingJobHistoryFile(new Path(dir.toURI().toString()), jobId, (applicationAttemptId.getAttemptId() - 1)).toUri().getRawPath());
historyFile.createNewFile();
FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(historyFile), null);
EventWriter writer = new EventWriter(out, EventWriter.WriteMode.JSON);
writer.close();
FileSystem fs = FileSystem.get(conf);
JobSplitWriter.createSplitFiles(new Path(dir.getAbsolutePath()), conf, fs, new org.apache.hadoop.mapred.InputSplit[0]);
ContainerId containerId = ContainerId.fromString(containerIdStr);
MRAppMasterTestLaunchTime appMaster = new MRAppMasterTestLaunchTime(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis());
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
appMaster.stop();
assertTrue("Job launch time should not be negative.", appMaster.jobLaunchTime.get() >= 0);
}
use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class TestTaskAttemptListenerImpl method testCheckpointIDTracking.
@Test
public void testCheckpointIDTracking() throws IOException, InterruptedException {
SystemClock clock = SystemClock.getInstance();
org.apache.hadoop.mapreduce.v2.app.job.Task mockTask = mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true);
Job mockJob = mock(Job.class);
when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask);
Dispatcher dispatcher = mock(Dispatcher.class);
@SuppressWarnings("unchecked") EventHandler<Event> ea = mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
AppContext appCtx = mock(AppContext.class);
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
when(appCtx.getClock()).thenReturn(clock);
when(appCtx.getEventHandler()).thenReturn(ea);
JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
final TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, policy) {
@Override
protected void registerHeartbeatHandler(Configuration conf) {
taskHeartbeatHandler = hbHandler;
}
};
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.TASK_PREEMPTION, true);
//conf.setBoolean("preemption.reduce", true);
listener.init(conf);
listener.start();
TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 0);
List<Path> partialOut = new ArrayList<Path>();
partialOut.add(new Path("/prev1"));
partialOut.add(new Path("/prev2"));
Counters counters = mock(Counters.class);
final long CBYTES = 64L * 1024 * 1024;
final long CTIME = 4344L;
final Path CLOC = new Path("/test/1");
Counter cbytes = mock(Counter.class);
when(cbytes.getValue()).thenReturn(CBYTES);
Counter ctime = mock(Counter.class);
when(ctime.getValue()).thenReturn(CTIME);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_BYTES))).thenReturn(cbytes);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_MS))).thenReturn(ctime);
// propagating a taskstatus that contains a checkpoint id
TaskCheckpointID incid = new TaskCheckpointID(new FSCheckpointID(CLOC), partialOut, counters);
listener.setCheckpointID(org.apache.hadoop.mapred.TaskID.downgrade(tid.getTaskID()), incid);
// and try to get it back
CheckpointID outcid = listener.getCheckpointID(tid.getTaskID());
TaskCheckpointID tcid = (TaskCheckpointID) outcid;
assertEquals(CBYTES, tcid.getCheckpointBytes());
assertEquals(CTIME, tcid.getCheckpointTime());
assertTrue(partialOut.containsAll(tcid.getPartialCommittedOutput()));
assertTrue(tcid.getPartialCommittedOutput().containsAll(partialOut));
//assert it worked
assert outcid == incid;
listener.stop();
}
use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class MRApp method waitForState.
public void waitForState(Job job, JobState finalState) throws Exception {
int timeoutSecs = 0;
JobReport report = job.getReport();
while (!finalState.equals(report.getJobState()) && timeoutSecs++ < 20) {
System.out.println("Job State is : " + report.getJobState() + " Waiting for state : " + finalState + " map progress : " + report.getMapProgress() + " reduce progress : " + report.getReduceProgress());
report = job.getReport();
Thread.sleep(500);
}
System.out.println("Job State is : " + report.getJobState());
Assert.assertEquals("Job state is not correct (timedout)", finalState, job.getState());
}
use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class MRApp method submit.
public Job submit(Configuration conf, boolean mapSpeculative, boolean reduceSpeculative) throws Exception {
String user = conf.get(MRJobConfig.USER_NAME, UserGroupInformation.getCurrentUser().getShortUserName());
conf.set(MRJobConfig.USER_NAME, user);
conf.set(MRJobConfig.MR_AM_STAGING_DIR, testAbsPath.toString());
conf.setBoolean(MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR, true);
// TODO: fix the bug where the speculator gets events with
// not-fully-constructed objects. For now, disable speculative exec
conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, mapSpeculative);
conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, reduceSpeculative);
init(conf);
start();
DefaultMetricsSystem.shutdown();
Job job = getContext().getAllJobs().values().iterator().next();
if (assignedQueue != null) {
job.setQueueName(assignedQueue);
}
// Write job.xml
String jobFile = MRApps.getJobFile(conf, user, TypeConverter.fromYarn(job.getID()));
LOG.info("Writing job conf to " + jobFile);
new File(jobFile).getParentFile().mkdirs();
conf.writeXml(new FileOutputStream(jobFile));
return job;
}
use of org.apache.hadoop.mapreduce.v2.app.job.Job in project hadoop by apache.
the class MRApp method verifyCompleted.
public void verifyCompleted() {
for (Job job : getContext().getAllJobs().values()) {
JobReport jobReport = job.getReport();
System.out.println("Job start time :" + jobReport.getStartTime());
System.out.println("Job finish time :" + jobReport.getFinishTime());
Assert.assertTrue("Job start time is not less than finish time", jobReport.getStartTime() <= jobReport.getFinishTime());
Assert.assertTrue("Job finish time is in future", jobReport.getFinishTime() <= System.currentTimeMillis());
for (Task task : job.getTasks().values()) {
TaskReport taskReport = task.getReport();
System.out.println("Task start time : " + taskReport.getStartTime());
System.out.println("Task finish time : " + taskReport.getFinishTime());
Assert.assertTrue("Task start time is not less than finish time", taskReport.getStartTime() <= taskReport.getFinishTime());
for (TaskAttempt attempt : task.getAttempts().values()) {
TaskAttemptReport attemptReport = attempt.getReport();
Assert.assertTrue("Attempt start time is not less than finish time", attemptReport.getStartTime() <= attemptReport.getFinishTime());
}
}
}
}
Aggregations