Search in sources :

Example 56 with JobID

use of org.apache.hadoop.mapreduce.JobID in project hadoop by apache.

the class TestCLI method testListAttemptIdsWithValidInput.

@Test
public void testListAttemptIdsWithValidInput() throws Exception {
    JobID jobId = JobID.forName(jobIdStr);
    Cluster mockCluster = mock(Cluster.class);
    Job job = mock(Job.class);
    CLI cli = spy(new CLI(new Configuration()));
    doReturn(mockCluster).when(cli).createCluster();
    when(job.getTaskReports(TaskType.MAP)).thenReturn(getTaskReports(jobId, TaskType.MAP));
    when(job.getTaskReports(TaskType.REDUCE)).thenReturn(getTaskReports(jobId, TaskType.REDUCE));
    when(mockCluster.getJob(jobId)).thenReturn(job);
    int retCode_MAP = cli.run(new String[] { "-list-attempt-ids", jobIdStr, "MAP", "running" });
    // testing case insensitive behavior
    int retCode_map = cli.run(new String[] { "-list-attempt-ids", jobIdStr, "map", "running" });
    int retCode_REDUCE = cli.run(new String[] { "-list-attempt-ids", jobIdStr, "REDUCE", "running" });
    int retCode_completed = cli.run(new String[] { "-list-attempt-ids", jobIdStr, "REDUCE", "completed" });
    assertEquals("MAP is a valid input,exit code should be 0", 0, retCode_MAP);
    assertEquals("map is a valid input,exit code should be 0", 0, retCode_map);
    assertEquals("REDUCE is a valid input,exit code should be 0", 0, retCode_REDUCE);
    assertEquals("REDUCE and completed are a valid inputs to -list-attempt-ids,exit code should be 0", 0, retCode_completed);
    verify(job, times(2)).getTaskReports(TaskType.MAP);
    verify(job, times(2)).getTaskReports(TaskType.REDUCE);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Cluster(org.apache.hadoop.mapreduce.Cluster) Job(org.apache.hadoop.mapreduce.Job) JobID(org.apache.hadoop.mapreduce.JobID) Test(org.junit.Test)

Example 57 with JobID

use of org.apache.hadoop.mapreduce.JobID in project hadoop by apache.

the class TestJobHistoryParsing method testMultipleFailedTasks.

@Test
public void testMultipleFailedTasks() throws Exception {
    JobHistoryParser parser = new JobHistoryParser(Mockito.mock(FSDataInputStream.class));
    EventReader reader = Mockito.mock(EventReader.class);
    // Hack!
    final AtomicInteger numEventsRead = new AtomicInteger(0);
    final org.apache.hadoop.mapreduce.TaskType taskType = org.apache.hadoop.mapreduce.TaskType.MAP;
    final TaskID[] tids = new TaskID[2];
    final JobID jid = new JobID("1", 1);
    tids[0] = new TaskID(jid, taskType, 0);
    tids[1] = new TaskID(jid, taskType, 1);
    Mockito.when(reader.getNextEvent()).thenAnswer(new Answer<HistoryEvent>() {

        public HistoryEvent answer(InvocationOnMock invocation) throws IOException {
            // send two task start and two task fail events for tasks 0 and 1
            int eventId = numEventsRead.getAndIncrement();
            TaskID tid = tids[eventId & 0x1];
            if (eventId < 2) {
                return new TaskStartedEvent(tid, 0, taskType, "");
            }
            if (eventId < 4) {
                TaskFailedEvent tfe = new TaskFailedEvent(tid, 0, taskType, "failed", "FAILED", null, new Counters());
                tfe.setDatum(tfe.getDatum());
                return tfe;
            }
            if (eventId < 5) {
                JobUnsuccessfulCompletionEvent juce = new JobUnsuccessfulCompletionEvent(jid, 100L, 2, 0, "JOB_FAILED", Collections.singletonList("Task failed: " + tids[0].toString()));
                return juce;
            }
            return null;
        }
    });
    JobInfo info = parser.parse(reader);
    assertTrue("Task 0 not implicated", info.getErrorInfo().contains(tids[0].toString()));
}
Also used : EventReader(org.apache.hadoop.mapreduce.jobhistory.EventReader) TaskID(org.apache.hadoop.mapreduce.TaskID) JobUnsuccessfulCompletionEvent(org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent) IOException(java.io.IOException) HistoryEvent(org.apache.hadoop.mapreduce.jobhistory.HistoryEvent) TaskStartedEvent(org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent) JobHistoryParser(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) JobInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TaskFailedEvent(org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Counters(org.apache.hadoop.mapreduce.Counters) JobID(org.apache.hadoop.mapreduce.JobID) Test(org.junit.Test)

Example 58 with JobID

use of org.apache.hadoop.mapreduce.JobID in project hadoop by apache.

the class ZombieJob method maskTaskID.

/**
   * Mask the job ID part in a {@link TaskID}.
   * 
   * @param taskId
   *          raw {@link TaskID} read from trace
   * @return masked {@link TaskID} with empty {@link JobID}.
   */
private TaskID maskTaskID(TaskID taskId) {
    JobID jobId = new JobID();
    TaskType taskType = taskId.getTaskType();
    return new TaskID(jobId, taskType, taskId.getId());
}
Also used : TaskID(org.apache.hadoop.mapreduce.TaskID) TaskType(org.apache.hadoop.mapreduce.TaskType) JobID(org.apache.hadoop.mapreduce.JobID)

Example 59 with JobID

use of org.apache.hadoop.mapreduce.JobID in project hadoop by apache.

the class ZombieJob method maskAttemptID.

/**
   * Mask the job ID part in a {@link TaskAttemptID}.
   * 
   * @param attemptId
   *          raw {@link TaskAttemptID} read from trace
   * @return masked {@link TaskAttemptID} with empty {@link JobID}.
   */
private TaskAttemptID maskAttemptID(TaskAttemptID attemptId) {
    JobID jobId = new JobID();
    TaskType taskType = attemptId.getTaskType();
    TaskID taskId = attemptId.getTaskID();
    return new TaskAttemptID(jobId.getJtIdentifier(), jobId.getId(), taskType, taskId.getId(), attemptId.getId());
}
Also used : TaskID(org.apache.hadoop.mapreduce.TaskID) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) TaskType(org.apache.hadoop.mapreduce.TaskType) JobID(org.apache.hadoop.mapreduce.JobID)

Example 60 with JobID

use of org.apache.hadoop.mapreduce.JobID in project hive by apache.

the class TempletonControllerJob method run.

/**
 * Enqueue the job and print out the job id for later collection.
 * @see org.apache.hive.hcatalog.templeton.CompleteDelegator
 */
@Override
public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException, TException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Preparing to submit job: " + Arrays.toString(args));
    }
    Configuration conf = getConf();
    conf.set(JAR_ARGS_NAME, TempletonUtils.encodeArray(args));
    String memoryMb = appConf.mapperMemoryMb();
    if (memoryMb != null && memoryMb.length() != 0) {
        conf.set(AppConfig.HADOOP_MAP_MEMORY_MB, memoryMb);
    }
    String amMemoryMB = appConf.amMemoryMb();
    if (amMemoryMB != null && !amMemoryMB.isEmpty()) {
        conf.set(AppConfig.HADOOP_MR_AM_MEMORY_MB, amMemoryMB);
    }
    String amJavaOpts = appConf.controllerAMChildOpts();
    if (amJavaOpts != null && !amJavaOpts.isEmpty()) {
        conf.set(AppConfig.HADOOP_MR_AM_JAVA_OPTS, amJavaOpts);
    }
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    conf.set("user.name", user);
    job = new Job(conf);
    job.setJarByClass(LaunchMapper.class);
    job.setJobName(TempletonControllerJob.class.getSimpleName());
    job.setMapperClass(LaunchMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);
    job.setInputFormatClass(SingleInputFormat.class);
    NullOutputFormat<NullWritable, NullWritable> of = new NullOutputFormat<NullWritable, NullWritable>();
    job.setOutputFormatClass(of.getClass());
    job.setNumReduceTasks(0);
    JobClient jc = new JobClient(new JobConf(job.getConfiguration()));
    if (UserGroupInformation.isSecurityEnabled()) {
        Token<DelegationTokenIdentifier> mrdt = jc.getDelegationToken(new Text("mr token"));
        job.getCredentials().addToken(new Text("mr token"), mrdt);
    }
    String metastoreTokenStrForm = addHMSToken(job, user);
    job.submit();
    JobID submittedJobId = job.getJobID();
    if (metastoreTokenStrForm != null) {
        // so that it can be cancelled later from CompleteDelegator
        DelegationTokenCache.getStringFormTokenCache().storeDelegationToken(submittedJobId.toString(), metastoreTokenStrForm);
        LOG.debug("Added metastore delegation token for jobId=" + submittedJobId.toString() + " user=" + user);
    }
    return 0;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) DelegationTokenIdentifier(org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier) Text(org.apache.hadoop.io.Text) NullWritable(org.apache.hadoop.io.NullWritable) JobClient(org.apache.hadoop.mapred.JobClient) Job(org.apache.hadoop.mapreduce.Job) JobConf(org.apache.hadoop.mapred.JobConf) NullOutputFormat(org.apache.hadoop.mapreduce.lib.output.NullOutputFormat) JobID(org.apache.hadoop.mapreduce.JobID)

Aggregations

JobID (org.apache.hadoop.mapreduce.JobID)61 Test (org.junit.Test)33 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)17 IOException (java.io.IOException)16 TaskAttemptID (org.apache.hadoop.mapreduce.TaskAttemptID)16 TaskID (org.apache.hadoop.mapreduce.TaskID)16 Configuration (org.apache.hadoop.conf.Configuration)12 Job (org.apache.hadoop.mapreduce.Job)8 ArrayList (java.util.ArrayList)7 Path (org.apache.hadoop.fs.Path)7 EventHandler (org.apache.hadoop.yarn.event.EventHandler)7 HashMap (java.util.HashMap)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 JobConf (org.apache.hadoop.mapred.JobConf)6 TaskAttemptInfo (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo)6 OutputCommitter (org.apache.hadoop.mapreduce.OutputCommitter)5 Event (org.apache.hadoop.mapreduce.jobhistory.Event)5 EventType (org.apache.hadoop.mapreduce.jobhistory.EventType)5 JobHistoryEvent (org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent)5 JobHistoryEventHandler (org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler)5