Search in sources :

Example 1 with Cluster

use of org.apache.hadoop.mapreduce.Cluster in project hadoop by apache.

the class TestClientRedirect method testRedirect.

@Test
public void testRedirect() throws Exception {
    Configuration conf = new YarnConfiguration();
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
    conf.set(YarnConfiguration.RM_ADDRESS, RMADDRESS);
    conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, HSHOSTADDRESS);
    // Start the RM.
    RMService rmService = new RMService("test");
    rmService.init(conf);
    rmService.start();
    // Start the AM.
    AMService amService = new AMService();
    amService.init(conf);
    amService.start(conf);
    // Start the HS.
    HistoryService historyService = new HistoryService();
    historyService.init(conf);
    historyService.start(conf);
    LOG.info("services started");
    Cluster cluster = new Cluster(conf);
    org.apache.hadoop.mapreduce.JobID jobID = new org.apache.hadoop.mapred.JobID("201103121733", 1);
    org.apache.hadoop.mapreduce.Counters counters = cluster.getJob(jobID).getCounters();
    validateCounters(counters);
    Assert.assertTrue(amContact);
    LOG.info("Sleeping for 5 seconds before stop for" + " the client socket to not get EOF immediately..");
    Thread.sleep(5000);
    //bring down the AM service
    amService.stop();
    LOG.info("Sleeping for 5 seconds after stop for" + " the server to exit cleanly..");
    Thread.sleep(5000);
    amRestarting = true;
    // Same client
    //results are returned from fake (not started job)
    counters = cluster.getJob(jobID).getCounters();
    Assert.assertEquals(0, counters.countCounters());
    Job job = cluster.getJob(jobID);
    org.apache.hadoop.mapreduce.TaskID taskId = new org.apache.hadoop.mapreduce.TaskID(jobID, TaskType.MAP, 0);
    TaskAttemptID tId = new TaskAttemptID(taskId, 0);
    //invoke all methods to check that no exception is thrown
    job.killJob();
    job.killTask(tId);
    job.failTask(tId);
    job.getTaskCompletionEvents(0, 100);
    job.getStatus();
    job.getTaskDiagnostics(tId);
    job.getTaskReports(TaskType.MAP);
    job.getTrackingURL();
    amRestarting = false;
    amService = new AMService();
    amService.init(conf);
    amService.start(conf);
    //reset
    amContact = false;
    counters = cluster.getJob(jobID).getCounters();
    validateCounters(counters);
    Assert.assertTrue(amContact);
    // Stop the AM. It is not even restarting. So it should be treated as
    // completed.
    amService.stop();
    // Same client
    counters = cluster.getJob(jobID).getCounters();
    validateCounters(counters);
    Assert.assertTrue(hsContact);
    rmService.stop();
    historyService.stop();
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) Cluster(org.apache.hadoop.mapreduce.Cluster) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Job(org.apache.hadoop.mapreduce.Job) Test(org.junit.Test)

Example 2 with Cluster

use of org.apache.hadoop.mapreduce.Cluster in project hadoop by apache.

the class JobClientUnitTest method testGetJobWithUnknownJob.

@Test
public void testGetJobWithUnknownJob() throws Exception {
    TestJobClient client = new TestJobClient(new JobConf());
    Cluster mockCluster = mock(Cluster.class);
    client.setCluster(mockCluster);
    JobID id = new JobID("unknown", 0);
    when(mockCluster.getJob(id)).thenReturn(null);
    assertNull(client.getJob(id));
}
Also used : Cluster(org.apache.hadoop.mapreduce.Cluster) Test(org.junit.Test)

Example 3 with Cluster

use of org.apache.hadoop.mapreduce.Cluster in project hadoop by apache.

the class JobClientUnitTest method testCleanupTaskReportsWithNullJob.

@Test
public void testCleanupTaskReportsWithNullJob() throws Exception {
    TestJobClient client = new TestJobClient(new JobConf());
    Cluster mockCluster = mock(Cluster.class);
    client.setCluster(mockCluster);
    JobID id = new JobID("test", 0);
    when(mockCluster.getJob(id)).thenReturn(null);
    TaskReport[] result = client.getCleanupTaskReports(id);
    assertEquals(0, result.length);
    verify(mockCluster).getJob(id);
}
Also used : TaskReport(org.apache.hadoop.mapreduce.TaskReport) Cluster(org.apache.hadoop.mapreduce.Cluster) Test(org.junit.Test)

Example 4 with Cluster

use of org.apache.hadoop.mapreduce.Cluster in project hadoop by apache.

the class TestCLI method testListEvents.

@Test
public void testListEvents() throws Exception {
    Cluster mockCluster = mock(Cluster.class);
    CLI cli = spy(new CLI(new Configuration()));
    doReturn(mockCluster).when(cli).createCluster();
    String jobId1 = "job_1234654654_001";
    String jobId2 = "job_1234654656_002";
    Job mockJob1 = mockJob(mockCluster, jobId1, State.RUNNING);
    // Check exiting with non existing job
    int exitCode = cli.run(new String[] { "-events", jobId2, "0", "10" });
    assertEquals(-1, exitCode);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Cluster(org.apache.hadoop.mapreduce.Cluster) Job(org.apache.hadoop.mapreduce.Job) Test(org.junit.Test)

Example 5 with Cluster

use of org.apache.hadoop.mapreduce.Cluster in project hadoop by apache.

the class TestCLI method testListAttemptIdsWithInvalidInputs.

@Test
public void testListAttemptIdsWithInvalidInputs() throws Exception {
    JobID jobId = JobID.forName(jobIdStr);
    Cluster mockCluster = mock(Cluster.class);
    Job job = mock(Job.class);
    CLI cli = spy(new CLI(new Configuration()));
    doReturn(mockCluster).when(cli).createCluster();
    when(mockCluster.getJob(jobId)).thenReturn(job);
    int retCode_JOB_SETUP = cli.run(new String[] { "-list-attempt-ids", jobIdStr, "JOB_SETUP", "running" });
    int retCode_JOB_CLEANUP = cli.run(new String[] { "-list-attempt-ids", jobIdStr, "JOB_CLEANUP", "running" });
    int retCode_invalidTaskState = cli.run(new String[] { "-list-attempt-ids", jobIdStr, "REDUCE", "complete" });
    String jobIdStr2 = "job_1015298225799_0016";
    int retCode_invalidJobId = cli.run(new String[] { "-list-attempt-ids", jobIdStr2, "MAP", "running" });
    assertEquals("JOB_SETUP is an invalid input,exit code should be -1", -1, retCode_JOB_SETUP);
    assertEquals("JOB_CLEANUP is an invalid input,exit code should be -1", -1, retCode_JOB_CLEANUP);
    assertEquals("complete is an invalid input,exit code should be -1", -1, retCode_invalidTaskState);
    assertEquals("Non existing job id should be skippted with -1", -1, retCode_invalidJobId);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Cluster(org.apache.hadoop.mapreduce.Cluster) Job(org.apache.hadoop.mapreduce.Job) JobID(org.apache.hadoop.mapreduce.JobID) Test(org.junit.Test)

Aggregations

Cluster (org.apache.hadoop.mapreduce.Cluster)22 Test (org.junit.Test)17 Configuration (org.apache.hadoop.conf.Configuration)12 Job (org.apache.hadoop.mapreduce.Job)11 Path (org.apache.hadoop.fs.Path)5 IOException (java.io.IOException)4 JobID (org.apache.hadoop.mapreduce.JobID)4 TaskReport (org.apache.hadoop.mapreduce.TaskReport)4 ArrayList (java.util.ArrayList)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 PrintWriter (java.io.PrintWriter)1 Random (java.util.Random)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 HarFileSystem (org.apache.hadoop.fs.HarFileSystem)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 BackupCopyJob (org.apache.hadoop.hbase.backup.BackupCopyJob)1 SequenceFile (org.apache.hadoop.io.SequenceFile)1 JobStatus (org.apache.hadoop.mapreduce.JobStatus)1 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)1