Search in sources :

Example 81 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class MockJobs method newJob.

public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile, boolean hasFailedTasks) {
    final JobId id = newJobID(appID, i);
    final String name = newJobName();
    final JobReport report = newJobReport(id);
    final Map<TaskId, Task> tasks = newTasks(id, n, m, hasFailedTasks);
    final TaskCount taskCount = getTaskCount(tasks.values());
    final Counters counters = getCounters(tasks.values());
    final Path configFile = confFile;
    Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
    final Configuration conf = new Configuration();
    conf.set(JobACL.VIEW_JOB.getAclName(), "testuser");
    conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
    JobACLsManager aclsManager = new JobACLsManager(conf);
    tmpJobACLs = aclsManager.constructJobACLs(conf);
    final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
    return new Job() {

        @Override
        public JobId getID() {
            return id;
        }

        @Override
        public String getName() {
            return name;
        }

        @Override
        public JobState getState() {
            return report.getJobState();
        }

        @Override
        public JobReport getReport() {
            return report;
        }

        @Override
        public float getProgress() {
            return 0;
        }

        @Override
        public Counters getAllCounters() {
            return counters;
        }

        @Override
        public Map<TaskId, Task> getTasks() {
            return tasks;
        }

        @Override
        public Task getTask(TaskId taskID) {
            return tasks.get(taskID);
        }

        @Override
        public int getTotalMaps() {
            return taskCount.maps;
        }

        @Override
        public int getTotalReduces() {
            return taskCount.reduces;
        }

        @Override
        public int getCompletedMaps() {
            return taskCount.completedMaps;
        }

        @Override
        public int getCompletedReduces() {
            return taskCount.completedReduces;
        }

        @Override
        public boolean isUber() {
            return false;
        }

        @Override
        public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(int fromEventId, int maxEvents) {
            return null;
        }

        @Override
        public TaskCompletionEvent[] getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
            return null;
        }

        @Override
        public Map<TaskId, Task> getTasks(TaskType taskType) {
            throw new UnsupportedOperationException("Not supported yet.");
        }

        @Override
        public List<String> getDiagnostics() {
            return Collections.<String>emptyList();
        }

        @Override
        public boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
            return true;
        }

        @Override
        public String getUserName() {
            return "mock";
        }

        @Override
        public String getQueueName() {
            return "mockqueue";
        }

        @Override
        public Path getConfFile() {
            return configFile;
        }

        @Override
        public Map<JobACL, AccessControlList> getJobACLs() {
            return jobACLs;
        }

        @Override
        public List<AMInfo> getAMInfos() {
            List<AMInfo> amInfoList = new LinkedList<AMInfo>();
            amInfoList.add(createAMInfo(1));
            amInfoList.add(createAMInfo(2));
            return amInfoList;
        }

        @Override
        public Configuration loadConfFile() throws IOException {
            FileContext fc = FileContext.getFileContext(configFile.toUri(), conf);
            Configuration jobConf = new Configuration(false);
            jobConf.addResource(fc.open(configFile), configFile.toString());
            return jobConf;
        }

        @Override
        public void setQueueName(String queueName) {
        // do nothing
        }

        @Override
        public void setJobPriority(Priority priority) {
        // do nothing
        }
    };
}
Also used : AccessControlList(org.apache.hadoop.security.authorize.AccessControlList) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) JobReport(org.apache.hadoop.mapreduce.v2.api.records.JobReport) TaskCompletionEvent(org.apache.hadoop.mapred.TaskCompletionEvent) JobACLsManager(org.apache.hadoop.mapred.JobACLsManager) TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Path(org.apache.hadoop.fs.Path) Priority(org.apache.hadoop.yarn.api.records.Priority) LinkedList(java.util.LinkedList) AMInfo(org.apache.hadoop.mapreduce.v2.api.records.AMInfo) Counters(org.apache.hadoop.mapreduce.Counters) JobACL(org.apache.hadoop.mapreduce.JobACL) FileContext(org.apache.hadoop.fs.FileContext)

Example 82 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class TestLocalContainerLauncher method testKillJob.

@SuppressWarnings("rawtypes")
@Test(timeout = 10000)
public void testKillJob() throws Exception {
    JobConf conf = new JobConf();
    AppContext context = mock(AppContext.class);
    // a simple event handler solely to detect the container cleaned event
    final CountDownLatch isDone = new CountDownLatch(1);
    EventHandler<Event> handler = new EventHandler<Event>() {

        @Override
        public void handle(Event event) {
            LOG.info("handling event " + event.getClass() + " with type " + event.getType());
            if (event instanceof TaskAttemptEvent) {
                if (event.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) {
                    isDone.countDown();
                }
            }
        }
    };
    when(context.getEventHandler()).thenReturn(handler);
    // create and start the launcher
    LocalContainerLauncher launcher = new LocalContainerLauncher(context, mock(TaskUmbilicalProtocol.class));
    launcher.init(conf);
    launcher.start();
    // create mocked job, task, and task attempt
    // a single-mapper job
    JobId jobId = MRBuilderUtils.newJobId(System.currentTimeMillis(), 1, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId taId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    Job job = mock(Job.class);
    when(job.getTotalMaps()).thenReturn(1);
    when(job.getTotalReduces()).thenReturn(0);
    Map<JobId, Job> jobs = new HashMap<JobId, Job>();
    jobs.put(jobId, job);
    // app context returns the one and only job
    when(context.getAllJobs()).thenReturn(jobs);
    org.apache.hadoop.mapreduce.v2.app.job.Task ytask = mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
    when(ytask.getType()).thenReturn(TaskType.MAP);
    when(job.getTask(taskId)).thenReturn(ytask);
    // create a sleeping mapper that runs beyond the test timeout
    MapTask mapTask = mock(MapTask.class);
    when(mapTask.isMapOrReduce()).thenReturn(true);
    when(mapTask.isMapTask()).thenReturn(true);
    TaskAttemptID taskID = TypeConverter.fromYarn(taId);
    when(mapTask.getTaskID()).thenReturn(taskID);
    when(mapTask.getJobID()).thenReturn(taskID.getJobID());
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // sleep for a long time
            LOG.info("sleeping for 5 minutes...");
            Thread.sleep(5 * 60 * 1000);
            return null;
        }
    }).when(mapTask).run(isA(JobConf.class), isA(TaskUmbilicalProtocol.class));
    // pump in a task attempt launch event
    ContainerLauncherEvent launchEvent = new ContainerRemoteLaunchEvent(taId, null, createMockContainer(), mapTask);
    launcher.handle(launchEvent);
    Thread.sleep(200);
    // now pump in a container clean-up event
    ContainerLauncherEvent cleanupEvent = new ContainerLauncherEvent(taId, null, null, null, ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP);
    launcher.handle(cleanupEvent);
    // wait for the event to fire: this should be received promptly
    isDone.await();
    launcher.close();
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) HashMap(java.util.HashMap) EventHandler(org.apache.hadoop.yarn.event.EventHandler) ContainerLauncherEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) CountDownLatch(java.util.concurrent.CountDownLatch) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ContainerRemoteLaunchEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent) Event(org.apache.hadoop.yarn.event.Event) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) ContainerLauncherEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent) ContainerRemoteLaunchEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent) Test(org.junit.Test)

Example 83 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class TestTaskAttemptFinishingMonitor method testFinshingAttemptTimeout.

@Test
public void testFinshingAttemptTimeout() throws IOException, InterruptedException {
    SystemClock clock = SystemClock.getInstance();
    Configuration conf = new Configuration();
    conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT, 100);
    conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS, 10);
    AppContext appCtx = mock(AppContext.class);
    JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
    RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
    MockEventHandler eventHandler = new MockEventHandler();
    TaskAttemptFinishingMonitor taskAttemptFinishingMonitor = new TaskAttemptFinishingMonitor(eventHandler);
    taskAttemptFinishingMonitor.init(conf);
    taskAttemptFinishingMonitor.start();
    when(appCtx.getEventHandler()).thenReturn(eventHandler);
    when(appCtx.getNMHostname()).thenReturn("0.0.0.0");
    when(appCtx.getTaskAttemptFinishingMonitor()).thenReturn(taskAttemptFinishingMonitor);
    when(appCtx.getClock()).thenReturn(clock);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(appCtx);
    TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, policy);
    listener.init(conf);
    listener.start();
    JobId jid = MRBuilderUtils.newJobId(12345, 1, 1);
    TaskId tid = MRBuilderUtils.newTaskId(jid, 0, org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP);
    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0);
    appCtx.getTaskAttemptFinishingMonitor().register(attemptId);
    int check = 0;
    while (!eventHandler.timedOut && check++ < 10) {
        Thread.sleep(100);
    }
    taskAttemptFinishingMonitor.stop();
    assertTrue("Finishing attempt didn't time out.", eventHandler.timedOut);
}
Also used : RMHeartbeatHandler(org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) TaskAttemptFinishingMonitor(org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor) JobTokenSecretManager(org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 84 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class TestTaskAttemptListenerImpl method testGetTask.

@Test(timeout = 5000)
public void testGetTask() throws IOException {
    AppContext appCtx = mock(AppContext.class);
    JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
    RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class);
    TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class);
    Dispatcher dispatcher = mock(Dispatcher.class);
    @SuppressWarnings("unchecked") EventHandler<Event> ea = mock(EventHandler.class);
    when(dispatcher.getEventHandler()).thenReturn(ea);
    when(appCtx.getEventHandler()).thenReturn(ea);
    CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
    policy.init(appCtx);
    MockTaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, hbHandler, policy);
    Configuration conf = new Configuration();
    listener.init(conf);
    listener.start();
    JVMId id = new JVMId("foo", 1, true, 1);
    WrappedJvmID wid = new WrappedJvmID(id.getJobId(), id.isMap, id.getId());
    // Verify ask before registration.
    //The JVM ID has not been registered yet so we should kill it.
    JvmContext context = new JvmContext();
    context.jvmId = id;
    JvmTask result = listener.getTask(context);
    assertNotNull(result);
    assertTrue(result.shouldDie);
    // Verify ask after registration but before launch. 
    // Don't kill, should be null.
    TaskAttemptId attemptID = mock(TaskAttemptId.class);
    Task task = mock(Task.class);
    //Now put a task with the ID
    listener.registerPendingTask(task, wid);
    result = listener.getTask(context);
    assertNull(result);
    // Unregister for more testing.
    listener.unregister(attemptID, wid);
    // Verify ask after registration and launch
    //Now put a task with the ID
    listener.registerPendingTask(task, wid);
    listener.registerLaunchedTask(attemptID, wid);
    verify(hbHandler).register(attemptID);
    result = listener.getTask(context);
    assertNotNull(result);
    assertFalse(result.shouldDie);
    // Don't unregister yet for more testing.
    //Verify that if we call it again a second time we are told to die.
    result = listener.getTask(context);
    assertNotNull(result);
    assertTrue(result.shouldDie);
    listener.unregister(attemptID, wid);
    // Verify after unregistration.
    result = listener.getTask(context);
    assertNotNull(result);
    assertTrue(result.shouldDie);
    listener.stop();
    // test JVMID
    JVMId jvmid = JVMId.forName("jvm_001_002_m_004");
    assertNotNull(jvmid);
    try {
        JVMId.forName("jvm_001_002_m_004_006");
        fail();
    } catch (IllegalArgumentException e) {
        assertEquals(e.getMessage(), "TaskId string : jvm_001_002_m_004_006 is not properly formed");
    }
}
Also used : RMHeartbeatHandler(org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler) Configuration(org.apache.hadoop.conf.Configuration) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) CheckpointAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.CheckpointAMPreemptionPolicy) JobTokenSecretManager(org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager) TaskHeartbeatHandler(org.apache.hadoop.mapreduce.v2.app.TaskHeartbeatHandler) TaskAttemptCompletionEvent(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent) Event(org.apache.hadoop.yarn.event.Event) Test(org.junit.Test)

Example 85 with TaskId

use of org.apache.hadoop.mapreduce.v2.api.records.TaskId in project hadoop by apache.

the class StartEndTimesBase method thresholdRuntime.

@Override
public long thresholdRuntime(TaskId taskID) {
    JobId jobID = taskID.getJobId();
    Job job = context.getJob(jobID);
    TaskType type = taskID.getTaskType();
    DataStatistics statistics = dataStatisticsForTask(taskID);
    int completedTasksOfType = type == TaskType.MAP ? job.getCompletedMaps() : job.getCompletedReduces();
    int totalTasksOfType = type == TaskType.MAP ? job.getTotalMaps() : job.getTotalReduces();
    if (completedTasksOfType < MINIMUM_COMPLETE_NUMBER_TO_SPECULATE || (((float) completedTasksOfType) / totalTasksOfType) < MINIMUM_COMPLETE_PROPORTION_TO_SPECULATE) {
        return Long.MAX_VALUE;
    }
    long result = statistics == null ? Long.MAX_VALUE : (long) statistics.outlier(slowTaskRelativeTresholds.get(job));
    return result;
}
Also used : TaskType(org.apache.hadoop.mapreduce.v2.api.records.TaskType) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId)

Aggregations

TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)113 Test (org.junit.Test)75 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)69 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)60 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)58 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)56 TaskAttempt (org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt)42 Configuration (org.apache.hadoop.conf.Configuration)29 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)24 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)24 Path (org.apache.hadoop.fs.Path)23 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)22 HashMap (java.util.HashMap)20 JobConf (org.apache.hadoop.mapred.JobConf)17 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)17 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)17 MapTaskAttemptImpl (org.apache.hadoop.mapred.MapTaskAttemptImpl)16 TaskAttemptListener (org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener)16 InetSocketAddress (java.net.InetSocketAddress)15 TaskSplitMetaInfo (org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo)15