use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.
the class TestMRJobsWithHistoryService method testJobHistoryData.
@Test(timeout = 90000)
public void testJobHistoryData() throws IOException, InterruptedException, AvroRemoteException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(mrCluster.getConfig());
// Job with 3 maps and 2 reduces
Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
job.setJarByClass(SleepJob.class);
// The AppMaster jar itself.
job.addFileToClassPath(APP_JAR);
job.waitForCompletion(true);
Counters counterMR = job.getCounters();
JobId jobId = TypeConverter.toYarn(job.getJobID());
ApplicationId appID = jobId.getAppId();
int pollElapsed = 0;
while (true) {
Thread.sleep(1000);
pollElapsed += 1000;
if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());
Counters counterHS = job.getCounters();
//TODO the Assert below worked. need to check
//Should we compare each field or convert to V2 counter and compare
LOG.info("CounterHS " + counterHS);
LOG.info("CounterMR " + counterMR);
Assert.assertEquals(counterHS, counterMR);
HSClientProtocol historyClient = instantiateHistoryProxy();
GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
gjReq.setJobId(jobId);
JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
verifyJobReport(jobReport, jobId);
}
use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.
the class TestLocalJobSubmission method testJobMaxMapConfig.
/**
* test JOB_MAX_MAP configuration.
* @throws Exception
*/
@Test
public void testJobMaxMapConfig() throws Exception {
Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, "local");
conf.setInt(MRJobConfig.JOB_MAX_MAP, 0);
final String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
int res = -1;
try {
res = ToolRunner.run(conf, new SleepJob(), args);
fail("Job should fail");
} catch (IllegalArgumentException e) {
assertTrue(e.getLocalizedMessage().contains("The number of map tasks 1 exceeded limit"));
}
}
use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.
the class TestLocalJobSubmission method testLocalJobEncryptedIntermediateData.
/**
* test the local job submission with
* intermediate data encryption enabled.
* @throws IOException
*/
@Test
public void testLocalJobEncryptedIntermediateData() throws IOException {
Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, "local");
conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
final String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
int res = -1;
try {
res = ToolRunner.run(conf, new SleepJob(), args);
} catch (Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:", 0, res);
}
use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.
the class TestLocalJobSubmission method testLocalJobLibjarsOption.
/**
* Test the local job submission options of -jt local -libjars.
*
* @param conf the {@link Configuration} to use
* @throws IOException thrown if there's an error creating the JAR file
*/
private void testLocalJobLibjarsOption(Configuration conf) throws IOException {
Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000");
conf.set(MRConfig.FRAMEWORK_NAME, "local");
final String[] args = { "-jt", "local", "-libjars", jarPath.toString(), "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
int res = -1;
try {
res = ToolRunner.run(conf, new SleepJob(), args);
} catch (Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:", 0, res);
}
use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.
the class TestBinaryTokenFile method testTokenCacheFile.
/**
* run a distributed job with -tokenCacheFile option parameter and
* verify that no exception happens.
* @throws IOException
*/
@Test
public void testTokenCacheFile() throws IOException {
Configuration conf = mrCluster.getConfig();
createBinaryTokenFile(conf);
// provide namenodes names for the job to get the delegation tokens for
final String nnUri = dfsCluster.getURI(0).toString();
conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
// using argument to pass the file name
final String[] args = { "-tokenCacheFile", binaryTokenFileName.toString(), "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
int res = -1;
try {
res = ToolRunner.run(conf, new SleepJob(), args);
} catch (Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:", 0, res);
}
Aggregations