use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class TestKill method testKillTaskWaitKillJobBeforeTA_DONE.
@Test
public void testKillTaskWaitKillJobBeforeTA_DONE() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final Dispatcher dispatcher = new MyAsyncDispatch(latch, JobEventType.JOB_KILL);
MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {
@Override
public Dispatcher createDispatcher() {
return dispatcher;
}
};
Job job = app.submit(new Configuration());
JobId jobId = app.getJobId();
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
app.waitForState(mapTask, TaskState.RUNNING);
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
// The order in the dispatch event queue, from first to last
// JobEventType.JOB_KILL
// TA_DONE
// TaskEventType.T_KILL ( from JobEventType.JOB_KILL handling )
// TaskAttemptEventType.TA_CONTAINER_COMPLETED ( from TA_DONE handling )
// TaskAttemptEventType.TA_KILL ( from TaskEventType.T_KILL handling )
// TaskEventType.T_ATTEMPT_SUCCEEDED ( from TA_CONTAINER_COMPLETED handling )
// TaskEventType.T_ATTEMPT_KILLED ( from TA_KILL handling )
// Now kill the job
app.getContext().getEventHandler().handle(new JobEvent(jobId, JobEventType.JOB_KILL));
// Finish map
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(), TaskAttemptEventType.TA_DONE));
//unblock
latch.countDown();
app.waitForInternalState((JobImpl) job, JobStateInternal.KILLED);
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class MRAppMasterTestLaunchTime method testMRAppMasterSuccessLock.
@Test
public void testMRAppMasterSuccessLock() throws IOException, InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
Path end = MRApps.getEndJobCommitSuccessFile(conf, userName, jobId);
FileSystem fs = FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId = ContainerId.fromString(containerIdStr);
MRAppMaster appMaster = new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis(), false, false);
boolean caught = false;
try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
} catch (IOException e) {
//The IO Exception is expected
LOG.info("Caught expected Exception", e);
caught = true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.SUCCEEDED, appMaster.forcedState);
appMaster.stop();
// verify the final status is SUCCEEDED
verifyFailedStatus((MRAppMasterTest) appMaster, "SUCCEEDED");
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class MRAppMasterTestLaunchTime method testMRAppMasterFailLock.
@Test
public void testMRAppMasterFailLock() throws IOException, InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
Path end = MRApps.getEndJobCommitFailureFile(conf, userName, jobId);
FileSystem fs = FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId = ContainerId.fromString(containerIdStr);
MRAppMaster appMaster = new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis(), false, false);
boolean caught = false;
try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
} catch (IOException e) {
//The IO Exception is expected
LOG.info("Caught expected Exception", e);
caught = true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.FAILED, appMaster.forcedState);
appMaster.stop();
// verify the final status is FAILED
verifyFailedStatus((MRAppMasterTest) appMaster, "FAILED");
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class MRAppMasterTestLaunchTime method testMRAppMasterJobLaunchTime.
@Test
public void testMRAppMasterJobLaunchTime() throws IOException, InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.set(JHAdminConfig.MR_HS_JHIST_FORMAT, "json");
ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.fromString(applicationAttemptIdStr);
JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
File dir = new File(MRApps.getStagingAreaDir(conf, userName).toString(), jobId.toString());
dir.mkdirs();
File historyFile = new File(JobHistoryUtils.getStagingJobHistoryFile(new Path(dir.toURI().toString()), jobId, (applicationAttemptId.getAttemptId() - 1)).toUri().getRawPath());
historyFile.createNewFile();
FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(historyFile), null);
EventWriter writer = new EventWriter(out, EventWriter.WriteMode.JSON);
writer.close();
FileSystem fs = FileSystem.get(conf);
JobSplitWriter.createSplitFiles(new Path(dir.getAbsolutePath()), conf, fs, new org.apache.hadoop.mapred.InputSplit[0]);
ContainerId containerId = ContainerId.fromString(containerIdStr);
MRAppMasterTestLaunchTime appMaster = new MRAppMasterTestLaunchTime(applicationAttemptId, containerId, "host", -1, -1, System.currentTimeMillis());
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
appMaster.stop();
assertTrue("Job launch time should not be negative.", appMaster.jobLaunchTime.get() >= 0);
}
use of org.apache.hadoop.mapreduce.v2.api.records.JobId in project hadoop by apache.
the class TestLocalContainerLauncher method testRenameMapOutputForReduce.
@Test
public void testRenameMapOutputForReduce() throws Exception {
final JobConf conf = new JobConf();
final MROutputFiles mrOutputFiles = new MROutputFiles();
mrOutputFiles.setConf(conf);
// make sure both dirs are distinct
//
conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString());
final Path mapOut = mrOutputFiles.getOutputFileForWrite(1);
conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString());
final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1);
Assert.assertNotEquals("Paths must be different!", mapOut.getParent(), mapOutIdx.getParent());
// make both dirs part of LOCAL_DIR
conf.setStrings(MRConfig.LOCAL_DIR, localDirs);
final FileContext lfc = FileContext.getLocalFSFileContext(conf);
lfc.create(mapOut, EnumSet.of(CREATE)).close();
lfc.create(mapOutIdx, EnumSet.of(CREATE)).close();
final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2);
final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0);
LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles);
}
Aggregations