Search in sources :

Example 6 with SleepJob

use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.

the class TestMRJobsWithHistoryService method testJobHistoryData.

@Test(timeout = 90000)
public void testJobHistoryData() throws IOException, InterruptedException, AvroRemoteException, ClassNotFoundException {
    if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
        return;
    }
    SleepJob sleepJob = new SleepJob();
    sleepJob.setConf(mrCluster.getConfig());
    // Job with 3 maps and 2 reduces
    Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
    job.setJarByClass(SleepJob.class);
    // The AppMaster jar itself.
    job.addFileToClassPath(APP_JAR);
    job.waitForCompletion(true);
    Counters counterMR = job.getCounters();
    JobId jobId = TypeConverter.toYarn(job.getJobID());
    ApplicationId appID = jobId.getAppId();
    int pollElapsed = 0;
    while (true) {
        Thread.sleep(1000);
        pollElapsed += 1000;
        if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
            break;
        }
        if (pollElapsed >= 60000) {
            LOG.warn("application did not reach terminal state within 60 seconds");
            break;
        }
    }
    Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());
    Counters counterHS = job.getCounters();
    //TODO the Assert below worked. need to check
    //Should we compare each field or convert to V2 counter and compare
    LOG.info("CounterHS " + counterHS);
    LOG.info("CounterMR " + counterMR);
    Assert.assertEquals(counterHS, counterMR);
    HSClientProtocol historyClient = instantiateHistoryProxy();
    GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
    gjReq.setJobId(jobId);
    JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
    verifyJobReport(jobReport, jobId);
}
Also used : HSClientProtocol(org.apache.hadoop.mapreduce.v2.api.HSClientProtocol) SleepJob(org.apache.hadoop.mapreduce.SleepJob) Counters(org.apache.hadoop.mapreduce.Counters) SleepJob(org.apache.hadoop.mapreduce.SleepJob) Job(org.apache.hadoop.mapreduce.Job) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) GetJobReportRequest(org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest) JobReport(org.apache.hadoop.mapreduce.v2.api.records.JobReport) Test(org.junit.Test)

Example 7 with SleepJob

use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.

the class TestLocalJobSubmission method testJobMaxMapConfig.

/**
   * test JOB_MAX_MAP configuration.
   * @throws Exception
   */
@Test
public void testJobMaxMapConfig() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRConfig.FRAMEWORK_NAME, "local");
    conf.setInt(MRJobConfig.JOB_MAX_MAP, 0);
    final String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
    int res = -1;
    try {
        res = ToolRunner.run(conf, new SleepJob(), args);
        fail("Job should fail");
    } catch (IllegalArgumentException e) {
        assertTrue(e.getLocalizedMessage().contains("The number of map tasks 1 exceeded limit"));
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) SleepJob(org.apache.hadoop.mapreduce.SleepJob) Test(org.junit.Test)

Example 8 with SleepJob

use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.

the class TestLocalJobSubmission method testLocalJobEncryptedIntermediateData.

/**
   * test the local job submission with
   * intermediate data encryption enabled.
   * @throws IOException
   */
@Test
public void testLocalJobEncryptedIntermediateData() throws IOException {
    Configuration conf = new Configuration();
    conf.set(MRConfig.FRAMEWORK_NAME, "local");
    conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
    final String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
    int res = -1;
    try {
        res = ToolRunner.run(conf, new SleepJob(), args);
    } catch (Exception e) {
        System.out.println("Job failed with " + e.getLocalizedMessage());
        e.printStackTrace(System.out);
        fail("Job failed");
    }
    assertEquals("dist job res is not 0:", 0, res);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) SleepJob(org.apache.hadoop.mapreduce.SleepJob) IOException(java.io.IOException) Test(org.junit.Test)

Example 9 with SleepJob

use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.

the class TestLocalJobSubmission method testLocalJobLibjarsOption.

/**
   * Test the local job submission options of -jt local -libjars.
   *
   * @param conf the {@link Configuration} to use
   * @throws IOException thrown if there's an error creating the JAR file
   */
private void testLocalJobLibjarsOption(Configuration conf) throws IOException {
    Path jarPath = makeJar(new Path(TEST_ROOT_DIR, "test.jar"));
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:9000");
    conf.set(MRConfig.FRAMEWORK_NAME, "local");
    final String[] args = { "-jt", "local", "-libjars", jarPath.toString(), "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
    int res = -1;
    try {
        res = ToolRunner.run(conf, new SleepJob(), args);
    } catch (Exception e) {
        System.out.println("Job failed with " + e.getLocalizedMessage());
        e.printStackTrace(System.out);
        fail("Job failed");
    }
    assertEquals("dist job res is not 0:", 0, res);
}
Also used : Path(org.apache.hadoop.fs.Path) SleepJob(org.apache.hadoop.mapreduce.SleepJob) IOException(java.io.IOException)

Example 10 with SleepJob

use of org.apache.hadoop.mapreduce.SleepJob in project hadoop by apache.

the class TestBinaryTokenFile method testTokenCacheFile.

/**
   * run a distributed job with -tokenCacheFile option parameter and
   * verify that no exception happens.
   * @throws IOException
  */
@Test
public void testTokenCacheFile() throws IOException {
    Configuration conf = mrCluster.getConfig();
    createBinaryTokenFile(conf);
    // provide namenodes names for the job to get the delegation tokens for
    final String nnUri = dfsCluster.getURI(0).toString();
    conf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
    // using argument to pass the file name
    final String[] args = { "-tokenCacheFile", binaryTokenFileName.toString(), "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
    int res = -1;
    try {
        res = ToolRunner.run(conf, new SleepJob(), args);
    } catch (Exception e) {
        System.out.println("Job failed with " + e.getLocalizedMessage());
        e.printStackTrace(System.out);
        fail("Job failed");
    }
    assertEquals("dist job res is not 0:", 0, res);
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) SleepJob(org.apache.hadoop.mapreduce.SleepJob) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

SleepJob (org.apache.hadoop.mapreduce.SleepJob)13 File (java.io.File)8 Configuration (org.apache.hadoop.conf.Configuration)8 Job (org.apache.hadoop.mapreduce.Job)8 Test (org.junit.Test)8 RandomTextWriterJob (org.apache.hadoop.RandomTextWriterJob)6 RunningJob (org.apache.hadoop.mapred.RunningJob)6 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)6 IOException (java.io.IOException)5 Path (org.apache.hadoop.fs.Path)4 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)4 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)4 JobConf (org.apache.hadoop.mapred.JobConf)3 FileStatus (org.apache.hadoop.fs.FileStatus)2 BufferedReader (java.io.BufferedReader)1 FileNotFoundException (java.io.FileNotFoundException)1 InputStreamReader (java.io.InputStreamReader)1 Matcher (java.util.regex.Matcher)1 Pattern (java.util.regex.Pattern)1 JobClient (org.apache.hadoop.mapred.JobClient)1