Search in sources :

Example 71 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestContainerLauncherImpl method testOutOfOrder.

@Test(timeout = 5000)
public void testOutOfOrder() throws Exception {
    LOG.info("STARTING testOutOfOrder");
    AppContext mockContext = mock(AppContext.class);
    @SuppressWarnings("unchecked") EventHandler<Event> mockEventHandler = mock(EventHandler.class);
    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
    ContainerManagementProtocolClient mockCM = mock(ContainerManagementProtocolClient.class);
    ContainerLauncherImplUnderTest ut = new ContainerLauncherImplUnderTest(mockContext, mockCM);
    Configuration conf = new Configuration();
    ut.init(conf);
    ut.start();
    try {
        ContainerId contId = makeContainerId(0l, 0, 0, 1);
        TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
        String cmAddress = "127.0.0.1:8000";
        StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class);
        startResp.setAllServicesMetaData(serviceResponse);
        LOG.info("inserting cleanup event");
        ContainerLauncherEvent mockCleanupEvent = mock(ContainerLauncherEvent.class);
        when(mockCleanupEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
        when(mockCleanupEvent.getContainerID()).thenReturn(contId);
        when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        ut.handle(mockCleanupEvent);
        ut.waitForPoolToIdle();
        verify(mockCM, never()).stopContainers(any(StopContainersRequest.class));
        LOG.info("inserting launch event");
        ContainerRemoteLaunchEvent mockLaunchEvent = mock(ContainerRemoteLaunchEvent.class);
        when(mockLaunchEvent.getType()).thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
        when(mockLaunchEvent.getContainerID()).thenReturn(contId);
        when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
        when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
        when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
        when(mockLaunchEvent.getContainerToken()).thenReturn(createNewContainerToken(contId, cmAddress));
        ut.handle(mockLaunchEvent);
        ut.waitForPoolToIdle();
        verify(mockCM, never()).startContainers(any(StartContainersRequest.class));
    } finally {
        ut.stop();
    }
}
Also used : StartContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) Configuration(org.apache.hadoop.conf.Configuration) StartContainersResponse(org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Event(org.apache.hadoop.yarn.event.Event) StopContainersRequest(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest) Test(org.junit.Test)

Example 72 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestRMContainerAllocator method testReportedAppProgressWithOnlyMaps.

@Test
public void testReportedAppProgressWithOnlyMaps() throws Exception {
    LOG.info("Running testReportedAppProgressWithOnlyMaps");
    Configuration conf = new Configuration();
    final MyResourceManager rm = new MyResourceManager(conf);
    rm.start();
    DrainDispatcher rmDispatcher = (DrainDispatcher) rm.getRMContext().getDispatcher();
    // Submit the application
    RMApp rmApp = rm.submitApp(1024);
    rmDispatcher.await();
    MockNM amNodeManager = rm.registerNode("amNM:1234", 11264);
    amNodeManager.nodeHeartbeat(true);
    rmDispatcher.await();
    final ApplicationAttemptId appAttemptId = rmApp.getCurrentAppAttempt().getAppAttemptId();
    rm.sendAMLaunched(appAttemptId);
    rmDispatcher.await();
    MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId(appAttemptId, 0), 10, 0, false, this.getClass().getName(), true, 1) {

        @Override
        protected Dispatcher createDispatcher() {
            return new DrainDispatcher();
        }

        protected ContainerAllocator createContainerAllocator(ClientService clientService, AppContext context) {
            return new MyContainerAllocator(rm, appAttemptId, context);
        }

        ;
    };
    Assert.assertEquals(0.0, rmApp.getProgress(), 0.0);
    mrApp.submit(conf);
    Job job = mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue();
    DrainDispatcher amDispatcher = (DrainDispatcher) mrApp.getDispatcher();
    MyContainerAllocator allocator = (MyContainerAllocator) mrApp.getContainerAllocator();
    mrApp.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
    amDispatcher.await();
    // Wait till all map-attempts request for containers
    for (Task t : job.getTasks().values()) {
        mrApp.waitForInternalState((TaskAttemptImpl) t.getAttempts().values().iterator().next(), TaskAttemptStateInternal.UNASSIGNED);
    }
    amDispatcher.await();
    allocator.schedule();
    rmDispatcher.await();
    amNodeManager.nodeHeartbeat(true);
    rmDispatcher.await();
    allocator.schedule();
    rmDispatcher.await();
    // Wait for all map-tasks to be running
    for (Task t : job.getTasks().values()) {
        mrApp.waitForState(t, TaskState.RUNNING);
    }
    // Send heartbeat
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.05f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.05f, rmApp.getProgress(), 0.001f);
    Iterator<Task> it = job.getTasks().values().iterator();
    // Finish off 1 map so that map-progress is 10%
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.14f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.14f, rmApp.getProgress(), 0.001f);
    // Finish off 5 more map so that map-progress is 60%
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 5);
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.59f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
    // Finish off remaining map so that map-progress is 100%
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 4);
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.95f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ClientService(org.apache.hadoop.mapreduce.v2.app.client.ClientService) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Example 73 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestRMContainerAllocator method testHandlingFinishedContainers.

/**
   * MAPREDUCE-6771. Test if RMContainerAllocator generates the events in the
   * right order while processing finished containers.
   */
@Test
public void testHandlingFinishedContainers() {
    EventHandler eventHandler = mock(EventHandler.class);
    AppContext context = mock(RunningAppContext.class);
    when(context.getClock()).thenReturn(new ControlledClock());
    when(context.getClusterInfo()).thenReturn(new ClusterInfo(Resource.newInstance(10240, 1)));
    when(context.getEventHandler()).thenReturn(eventHandler);
    RMContainerAllocator containerAllocator = new RMContainerAllocatorForFinishedContainer(null, context, mock(AMPreemptionPolicy.class));
    ContainerStatus finishedContainer = ContainerStatus.newInstance(mock(ContainerId.class), ContainerState.COMPLETE, "", 0);
    containerAllocator.processFinishedContainer(finishedContainer);
    InOrder inOrder = inOrder(eventHandler);
    inOrder.verify(eventHandler).handle(isA(TaskAttemptDiagnosticsUpdateEvent.class));
    inOrder.verify(eventHandler).handle(isA(TaskAttemptEvent.class));
    inOrder.verifyNoMoreInteractions();
}
Also used : ClusterInfo(org.apache.hadoop.mapreduce.v2.app.ClusterInfo) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) InOrder(org.mockito.InOrder) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskAttemptDiagnosticsUpdateEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent) RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) EventHandler(org.apache.hadoop.yarn.event.EventHandler) TaskAttemptEvent(org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent) NoopAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy) AMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) Test(org.junit.Test)

Example 74 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestRMContainerAllocator method testReportedAppProgress.

@Test
public void testReportedAppProgress() throws Exception {
    LOG.info("Running testReportedAppProgress");
    Configuration conf = new Configuration();
    final MyResourceManager rm = new MyResourceManager(conf);
    rm.start();
    DrainDispatcher rmDispatcher = (DrainDispatcher) rm.getRMContext().getDispatcher();
    // Submit the application
    RMApp rmApp = rm.submitApp(1024);
    rmDispatcher.await();
    MockNM amNodeManager = rm.registerNode("amNM:1234", 21504);
    amNodeManager.nodeHeartbeat(true);
    rmDispatcher.await();
    final ApplicationAttemptId appAttemptId = rmApp.getCurrentAppAttempt().getAppAttemptId();
    rm.sendAMLaunched(appAttemptId);
    rmDispatcher.await();
    MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId(appAttemptId, 0), 10, 10, false, this.getClass().getName(), true, 1) {

        @Override
        protected Dispatcher createDispatcher() {
            return new DrainDispatcher();
        }

        protected ContainerAllocator createContainerAllocator(ClientService clientService, AppContext context) {
            return new MyContainerAllocator(rm, appAttemptId, context);
        }

        ;
    };
    Assert.assertEquals(0.0, rmApp.getProgress(), 0.0);
    mrApp.submit(conf);
    Job job = mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue();
    DrainDispatcher amDispatcher = (DrainDispatcher) mrApp.getDispatcher();
    MyContainerAllocator allocator = (MyContainerAllocator) mrApp.getContainerAllocator();
    mrApp.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
    amDispatcher.await();
    // Wait till all map-attempts request for containers
    for (Task t : job.getTasks().values()) {
        if (t.getType() == TaskType.MAP) {
            mrApp.waitForInternalState((TaskAttemptImpl) t.getAttempts().values().iterator().next(), TaskAttemptStateInternal.UNASSIGNED);
        }
    }
    amDispatcher.await();
    allocator.schedule();
    rmDispatcher.await();
    amNodeManager.nodeHeartbeat(true);
    rmDispatcher.await();
    allocator.schedule();
    rmDispatcher.await();
    // Wait for all map-tasks to be running
    for (Task t : job.getTasks().values()) {
        if (t.getType() == TaskType.MAP) {
            mrApp.waitForState(t, TaskState.RUNNING);
        }
    }
    // Send heartbeat
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.05f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.05f, rmApp.getProgress(), 0.001f);
    // Finish off 1 map.
    Iterator<Task> it = job.getTasks().values().iterator();
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.095f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.095f, rmApp.getProgress(), 0.001f);
    // Finish off 7 more so that map-progress is 80%
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 7);
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.41f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.41f, rmApp.getProgress(), 0.001f);
    // Finish off the 2 remaining maps
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 2);
    allocator.schedule();
    rmDispatcher.await();
    amNodeManager.nodeHeartbeat(true);
    rmDispatcher.await();
    allocator.schedule();
    rmDispatcher.await();
    // Wait for all reduce-tasks to be running
    for (Task t : job.getTasks().values()) {
        if (t.getType() == TaskType.REDUCE) {
            mrApp.waitForState(t, TaskState.RUNNING);
        }
    }
    // Finish off 2 reduces
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 2);
    allocator.schedule();
    rmDispatcher.await();
    Assert.assertEquals(0.59f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
    // Finish off the remaining 8 reduces.
    finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 8);
    allocator.schedule();
    rmDispatcher.await();
    // Remaining is JobCleanup
    Assert.assertEquals(0.95f, job.getProgress(), 0.001f);
    Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ClientService(org.apache.hadoop.mapreduce.v2.app.client.ClientService) MockNM(org.apache.hadoop.yarn.server.resourcemanager.MockNM) RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Test(org.junit.Test)

Example 75 with AppContext

use of org.apache.hadoop.mapreduce.v2.app.AppContext in project hadoop by apache.

the class TestRMContainerAllocator method testHeartbeatHandler.

@Test
public void testHeartbeatHandler() throws Exception {
    LOG.info("Running testHeartbeatHandler");
    Configuration conf = new Configuration();
    conf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS, 1);
    ControlledClock clock = new ControlledClock();
    AppContext appContext = mock(AppContext.class);
    when(appContext.getClock()).thenReturn(clock);
    when(appContext.getApplicationID()).thenReturn(ApplicationId.newInstance(1, 1));
    RMContainerAllocator allocator = new RMContainerAllocator(mock(ClientService.class), appContext, new NoopAMPreemptionPolicy()) {

        @Override
        protected void register() {
        }

        @Override
        protected ApplicationMasterProtocol createSchedulerProxy() {
            return mock(ApplicationMasterProtocol.class);
        }

        @Override
        protected synchronized void heartbeat() throws Exception {
        }
    };
    allocator.init(conf);
    allocator.start();
    clock.setTime(5);
    int timeToWaitMs = 5000;
    while (allocator.getLastHeartbeatTime() != 5 && timeToWaitMs > 0) {
        Thread.sleep(10);
        timeToWaitMs -= 10;
    }
    Assert.assertEquals(5, allocator.getLastHeartbeatTime());
    clock.setTime(7);
    timeToWaitMs = 5000;
    while (allocator.getLastHeartbeatTime() != 7 && timeToWaitMs > 0) {
        Thread.sleep(10);
        timeToWaitMs -= 10;
    }
    Assert.assertEquals(7, allocator.getLastHeartbeatTime());
    final AtomicBoolean callbackCalled = new AtomicBoolean(false);
    allocator.runOnNextHeartbeat(new Runnable() {

        @Override
        public void run() {
            callbackCalled.set(true);
        }
    });
    clock.setTime(8);
    timeToWaitMs = 5000;
    while (allocator.getLastHeartbeatTime() != 8 && timeToWaitMs > 0) {
        Thread.sleep(10);
        timeToWaitMs -= 10;
    }
    Assert.assertEquals(8, allocator.getLastHeartbeatTime());
    Assert.assertTrue(callbackCalled.get());
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ClientService(org.apache.hadoop.mapreduce.v2.app.client.ClientService) RunningAppContext(org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext) AppContext(org.apache.hadoop.mapreduce.v2.app.AppContext) NoopAMPreemptionPolicy(org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy) ControlledClock(org.apache.hadoop.yarn.util.ControlledClock) Test(org.junit.Test)

Aggregations

Test (org.junit.Test)74 AppContext (org.apache.hadoop.mapreduce.v2.app.AppContext)73 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)47 TaskId (org.apache.hadoop.mapreduce.v2.api.records.TaskId)32 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)32 Configuration (org.apache.hadoop.conf.Configuration)31 TaskAttemptId (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId)26 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)22 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)21 Path (org.apache.hadoop.fs.Path)18 MockAppContext (org.apache.hadoop.mapreduce.v2.app.MockAppContext)18 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)18 Task (org.apache.hadoop.mapreduce.v2.app.job.Task)17 Container (org.apache.hadoop.yarn.api.records.Container)14 JobConf (org.apache.hadoop.mapred.JobConf)13 TaskSplitMetaInfo (org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo)13 InetSocketAddress (java.net.InetSocketAddress)12 ClusterInfo (org.apache.hadoop.mapreduce.v2.app.ClusterInfo)12 TaskAttemptListener (org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener)12 TaskAttemptEvent (org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent)12