Search in sources :

Example 1 with ContainerLauncherEvent

use of org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent in project hadoop by apache.

the class TestContainerLauncher method testPoolSize.

@Test(timeout = 10000)
public void testPoolSize() throws InterruptedException {
    ApplicationId appId = ApplicationId.newInstance(12345, 67);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 3);
    JobId jobId = MRBuilderUtils.newJobId(appId, 8);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
    AppContext context = mock(AppContext.class);
    CustomContainerLauncher containerLauncher = new CustomContainerLauncher(context);
    containerLauncher.init(new Configuration());
    containerLauncher.start();
    ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
    // No events yet
    Assert.assertEquals(containerLauncher.initialPoolSize, MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE);
    Assert.assertEquals(0, threadPool.getPoolSize());
    Assert.assertEquals(containerLauncher.initialPoolSize, threadPool.getCorePoolSize());
    Assert.assertNull(containerLauncher.foundErrors);
    containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize;
    for (int i = 0; i < 10; i++) {
        ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
        TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, i);
        containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
    }
    waitForEvents(containerLauncher, 10);
    Assert.assertEquals(10, threadPool.getPoolSize());
    Assert.assertNull(containerLauncher.foundErrors);
    // Same set of hosts, so no change
    containerLauncher.finishEventHandling = true;
    int timeOut = 0;
    while (containerLauncher.numEventsProcessed.get() < 10 && timeOut++ < 200) {
        LOG.info("Waiting for number of events processed to become " + 10 + ". It is now " + containerLauncher.numEventsProcessed.get() + ". Timeout is " + timeOut);
        Thread.sleep(1000);
    }
    Assert.assertEquals(10, containerLauncher.numEventsProcessed.get());
    containerLauncher.finishEventHandling = false;
    for (int i = 0; i < 10; i++) {
        ContainerId containerId = ContainerId.newContainerId(appAttemptId, i + 10);
        TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, i + 10);
        containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
    }
    waitForEvents(containerLauncher, 20);
    Assert.assertEquals(10, threadPool.getPoolSize());
    Assert.assertNull(containerLauncher.foundErrors);
    // Different hosts, there should be an increase in core-thread-pool size to
    // 21(11hosts+10buffer)
    // Core pool size should be 21 but the live pool size should be only 11.
    containerLauncher.expectedCorePoolSize = 11 + containerLauncher.initialPoolSize;
    containerLauncher.finishEventHandling = false;
    ContainerId containerId = ContainerId.newContainerId(appAttemptId, 21);
    TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 21);
    containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host11:1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
    waitForEvents(containerLauncher, 21);
    Assert.assertEquals(11, threadPool.getPoolSize());
    Assert.assertNull(containerLauncher.foundErrors);
    containerLauncher.stop();
    // change configuration MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE
    // and verify initialPoolSize value.
    Configuration conf = new Configuration();
    conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE, 20);
    containerLauncher = new CustomContainerLauncher(context);
    containerLauncher.init(conf);
    Assert.assertEquals(containerLauncher.initialPoolSize, 20);
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 2 with ContainerLauncherEvent

use of org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent in project hadoop by apache.

the class TestLocalContainerLauncher method testKillJob.

@SuppressWarnings("rawtypes")
@Test(timeout = 10000)
public void testKillJob() throws Exception {
    JobConf conf = new JobConf();
    AppContext context = mock(AppContext.class);
    // a simple event handler solely to detect the container cleaned event
    final CountDownLatch isDone = new CountDownLatch(1);
    EventHandler<Event> handler = new EventHandler<Event>() {

        @Override
        public void handle(Event event) {
            LOG.info("handling event " + event.getClass() + " with type " + event.getType());
            if (event instanceof TaskAttemptEvent) {
                if (event.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) {
                    isDone.countDown();
                }
            }
        }
    };
    when(context.getEventHandler()).thenReturn(handler);
    // create and start the launcher
    LocalContainerLauncher launcher = new LocalContainerLauncher(context, mock(TaskUmbilicalProtocol.class));
    launcher.init(conf);
    launcher.start();
    // create mocked job, task, and task attempt
    // a single-mapper job
    JobId jobId = MRBuilderUtils.newJobId(System.currentTimeMillis(), 1, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId taId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
    Job job = mock(Job.class);
    when(job.getTotalMaps()).thenReturn(1);
    when(job.getTotalReduces()).thenReturn(0);
    Map<JobId, Job> jobs = new HashMap<JobId, Job>();
    jobs.put(jobId, job);
    // app context returns the one and only job
    when(context.getAllJobs()).thenReturn(jobs);
    org.apache.hadoop.mapreduce.v2.app.job.Task ytask = mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
    when(ytask.getType()).thenReturn(TaskType.MAP);
    when(job.getTask(taskId)).thenReturn(ytask);
    // create a sleeping mapper that runs beyond the test timeout
    MapTask mapTask = mock(MapTask.class);
    when(mapTask.isMapOrReduce()).thenReturn(true);
    when(mapTask.isMapTask()).thenReturn(true);
    TaskAttemptID taskID = TypeConverter.fromYarn(taId);
    when(mapTask.getTaskID()).thenReturn(taskID);
    when(mapTask.getJobID()).thenReturn(taskID.getJobID());
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // sleep for a long time
            LOG.info("sleeping for 5 minutes...");
            Thread.sleep(5 * 60 * 1000);
            return null;
        }
    }).when(mapTask).run(isA(JobConf.class), isA(TaskUmbilicalProtocol.class));
    // pump in a task attempt launch event
    ContainerLauncherEvent launchEvent = new ContainerRemoteLaunchEvent(taId, null, createMockContainer(), mapTask);
    launcher.handle(launchEvent);
    Thread.sleep(200);
    // now pump in a container clean-up event
    ContainerLauncherEvent cleanupEvent = new ContainerLauncherEvent(taId, null, null, null, ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP);
    launcher.handle(cleanupEvent);
    // wait for the event to fire: this should be received promptly
    isDone.await();
    launcher.close();
}
Also used : TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) HashMap(java.util.HashMap) EventHandler(org.apache.hadoop.yarn.event.EventHandler) ContainerLauncherEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) CountDownLatch(java.util.concurrent.CountDownLatch) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ContainerRemoteLaunchEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent) Event(org.apache.hadoop.yarn.event.Event) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) ContainerLauncherEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent) ContainerRemoteLaunchEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent) Test(org.junit.Test)

Example 3 with ContainerLauncherEvent

use of org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent in project hadoop by apache.

the class TestMRApp method testContainerPassThrough.

@Test
public void testContainerPassThrough() throws Exception {
    MRApp app = new MRApp(0, 1, true, this.getClass().getName(), true) {

        @Override
        protected ContainerLauncher createContainerLauncher(AppContext context) {
            return new MockContainerLauncher() {

                @Override
                public void handle(ContainerLauncherEvent event) {
                    if (event instanceof ContainerRemoteLaunchEvent) {
                        containerObtainedByContainerLauncher = ((ContainerRemoteLaunchEvent) event).getAllocatedContainer();
                    }
                    super.handle(event);
                }
            };
        }

        ;
    };
    Job job = app.submit(new Configuration());
    app.waitForState(job, JobState.SUCCEEDED);
    app.verifyCompleted();
    Collection<Task> tasks = job.getTasks().values();
    Collection<TaskAttempt> taskAttempts = tasks.iterator().next().getAttempts().values();
    TaskAttemptImpl taskAttempt = (TaskAttemptImpl) taskAttempts.iterator().next();
    // Container from RM should pass through to the launcher. Container object
    // should be the same.
    Assert.assertTrue(taskAttempt.container == containerObtainedByContainerLauncher);
}
Also used : Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) ContainerLauncherEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent) TaskAttemptImpl(org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) ContainerRemoteLaunchEvent(org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent) Test(org.junit.Test)

Example 4 with ContainerLauncherEvent

use of org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent in project hadoop by apache.

the class TestContainerLauncherImpl method testContainerCleaned.

@SuppressWarnings({ "rawtypes", "unchecked" })
@Test(timeout = 5000)
public void testContainerCleaned() throws Exception {
    LOG.info("STARTING testContainerCleaned");
    CyclicBarrier startLaunchBarrier = new CyclicBarrier(2);
    CyclicBarrier completeLaunchBarrier = new CyclicBarrier(2);
    AppContext mockContext = mock(AppContext.class);
    EventHandler mockEventHandler = mock(EventHandler.class);
    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
    ContainerManagementProtocolClient mockCM = new ContainerManagerForTest(startLaunchBarrier, completeLaunchBarrier);
    ContainerLauncherImplUnderTest ut = new ContainerLauncherImplUnderTest(mockContext, mockCM);
    Configuration conf = new Configuration();
    ut.init(conf);
    ut.start();
    try {
        ContainerId contId = makeContainerId(0l, 0, 0, 1);
        TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
        String cmAddress = "127.0.0.1:8000";
        StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class);
        startResp.setAllServicesMetaData(serviceResponse);
        LOG.info("inserting launch event");
        ContainerRemoteLaunchEvent mockLaunchEvent = mock(ContainerRemoteLaunchEvent.class);
        when(mockLaunchEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
        when(mockLaunchEvent.getContainerID()).thenReturn(contId);
        when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        when(mockLaunchEvent.getContainerToken()).thenReturn(createNewContainerToken(contId, cmAddress));
        ut.handle(mockLaunchEvent);
        startLaunchBarrier.await();
        LOG.info("inserting cleanup event");
        ContainerLauncherEvent mockCleanupEvent = mock(ContainerLauncherEvent.class);
        when(mockCleanupEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
        when(mockCleanupEvent.getContainerID()).thenReturn(contId);
        when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        ut.handle(mockCleanupEvent);
        completeLaunchBarrier.await();
        ut.waitForPoolToIdle();
        ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
        verify(mockEventHandler, atLeast(2)).handle(arg.capture());
        boolean containerCleaned = false;
        for (int i = 0; i < arg.getAllValues().size(); i++) {
            LOG.info(arg.getAllValues().get(i).toString());
            Event currentEvent = arg.getAllValues().get(i);
            if (currentEvent.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) {
                containerCleaned = true;
            }
        }
        assert (containerCleaned);
    } finally {
        ut.stop();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) StartContainersResponse(org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) EventHandler(org.apache.hadoop.yarn.event.EventHandler) CyclicBarrier(java.util.concurrent.CyclicBarrier) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Event(org.apache.hadoop.yarn.event.Event) Test(org.junit.Test)

Example 5 with ContainerLauncherEvent

use of org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent in project hadoop by apache.

the class TestContainerLauncherImpl method testOutOfOrder.

@Test(timeout = 5000)
public void testOutOfOrder() throws Exception {
    LOG.info("STARTING testOutOfOrder");
    AppContext mockContext = mock(AppContext.class);
    @SuppressWarnings("unchecked") EventHandler<Event> mockEventHandler = mock(EventHandler.class);
    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
    ContainerManagementProtocolClient mockCM = mock(ContainerManagementProtocolClient.class);
    ContainerLauncherImplUnderTest ut = new ContainerLauncherImplUnderTest(mockContext, mockCM);
    Configuration conf = new Configuration();
    ut.init(conf);
    ut.start();
    try {
        ContainerId contId = makeContainerId(0l, 0, 0, 1);
        TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
        String cmAddress = "127.0.0.1:8000";
        StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class);
        startResp.setAllServicesMetaData(serviceResponse);
        LOG.info("inserting cleanup event");
        ContainerLauncherEvent mockCleanupEvent = mock(ContainerLauncherEvent.class);
        when(mockCleanupEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
        when(mockCleanupEvent.getContainerID()).thenReturn(contId);
        when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        ut.handle(mockCleanupEvent);
        ut.waitForPoolToIdle();
        verify(mockCM, never()).stopContainers(any(StopContainersRequest.class));
        LOG.info("inserting launch event");
        ContainerRemoteLaunchEvent mockLaunchEvent = mock(ContainerRemoteLaunchEvent.class);
        when(mockLaunchEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
        when(mockLaunchEvent.getContainerID()).thenReturn(contId);
        when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
        when(mockLaunchEvent.getContainerToken()).thenReturn(createNewContainerToken(contId, cmAddress));
        ut.handle(mockLaunchEvent);
        ut.waitForPoolToIdle();
        verify(mockCM, never()).startContainers(any(StartContainersRequest.class));
    } finally {
        ut.stop();
    }
}
Also used : StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Configuration(org.apache.hadoop.conf.Configuration) StartContainersResponse(org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Event(org.apache.hadoop.yarn.event.Event) StopContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)7 Configuration (org.apache.hadoop.conf.Configuration)6 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)6 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)6 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)5 Event (org.apache.hadoop.yarn.event.Event)4 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)3 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)3 StartContainersResponse (org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse)3 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)2 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)2 ContainerLauncherEvent (org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent)2 ContainerRemoteLaunchEvent (org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent)2 StartContainersRequest (org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)2 StopContainersRequest (org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest)2 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)2 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)2 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)2 EventHandler (org.apache.hadoop.yarn.event.EventHandler)2 HashMap (java.util.HashMap)1