Search in sources :

Example 16 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class TestNodeStatusUpdaterForLabels method testInvalidNodeLabelsFromProvider.

@Test(timeout = 20000)
public void testInvalidNodeLabelsFromProvider() throws InterruptedException, IOException {
    final ResourceTrackerForLabels resourceTracker = new ResourceTrackerForLabels();
    nm = new NodeManager() {

        @Override
        protected NodeLabelsProvider createNodeLabelsProvider(Configuration conf) throws IOException {
            return dummyLabelsProviderRef;
        }

        @Override
        protected NodeStatusUpdater createNodeStatusUpdater(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker, NodeLabelsProvider labelsProvider) {
            return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics, labelsProvider) {

                @Override
                protected ResourceTracker getRMClient() {
                    return resourceTracker;
                }

                @Override
                protected void stopRMProxy() {
                    return;
                }
            };
        }
    };
    dummyLabelsProviderRef.setNodeLabels(toNodeLabelSet("P"));
    YarnConfiguration conf = createNMConfigForDistributeNodeLabels();
    conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:" + ServerSocketUtil.getPort(8040, 10));
    nm.init(conf);
    resourceTracker.resetNMHeartbeatReceiveFlag();
    nm.start();
    // wait till the first heartbeat
    resourceTracker.waitTillHeartbeat();
    resourceTracker.resetNMHeartbeatReceiveFlag();
    // heartbeat with invalid labels
    dummyLabelsProviderRef.setNodeLabels(toNodeLabelSet("_.P"));
    sendOutofBandHeartBeat();
    resourceTracker.waitTillHeartbeat();
    assertNull("On Invalid Labels we need to retain earlier labels, HB " + "needs to send null", resourceTracker.labels);
    resourceTracker.resetNMHeartbeatReceiveFlag();
    // on next heartbeat same invalid labels will be given by the provider, but
    // again label validation check and reset RM with empty labels set should
    // not happen
    sendOutofBandHeartBeat();
    resourceTracker.waitTillHeartbeat();
    assertNull("NodeStatusUpdater need not send repeatedly empty labels on " + "invalid labels from provider ", resourceTracker.labels);
    resourceTracker.resetNMHeartbeatReceiveFlag();
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) NodeLabelsProvider(org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider) ResourceTracker(org.apache.hadoop.yarn.server.api.ResourceTracker) IOException(java.io.IOException) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Test(org.junit.Test)

Example 17 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class TestNodeManagerResync method testNMSentContainerStatusOnResync.

// This is to test when NM gets the resync response from last heart beat, it
// should be able to send the already-sent-via-last-heart-beat container
// statuses again when it re-register with RM.
@Test
public void testNMSentContainerStatusOnResync() throws Exception {
    final ContainerStatus testCompleteContainer = TestNodeStatusUpdater.createContainerStatus(2, ContainerState.COMPLETE);
    final Container container = TestNodeStatusUpdater.getMockContainer(testCompleteContainer);
    NMContainerStatus report = createNMContainerStatus(2, ContainerState.COMPLETE);
    when(container.getNMContainerStatus()).thenReturn(report);
    NodeManager nm = new NodeManager() {

        int registerCount = 0;

        @Override
        protected NodeStatusUpdater createNodeStatusUpdater(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
            return new TestNodeStatusUpdaterResync(context, dispatcher, healthChecker, metrics) {

                @Override
                protected ResourceTracker createResourceTracker() {
                    return new MockResourceTracker() {

                        @Override
                        public RegisterNodeManagerResponse registerNodeManager(RegisterNodeManagerRequest request) throws YarnException, IOException {
                            if (registerCount == 0) {
                                // first register, no containers info.
                                try {
                                    Assert.assertEquals(0, request.getNMContainerStatuses().size());
                                } catch (AssertionError error) {
                                    error.printStackTrace();
                                    assertionFailedInThread.set(true);
                                }
                                // put the completed container into the context
                                getNMContext().getContainers().put(testCompleteContainer.getContainerId(), container);
                                getNMContext().getApplications().put(testCompleteContainer.getContainerId().getApplicationAttemptId().getApplicationId(), mock(Application.class));
                            } else {
                                // second register contains the completed container info.
                                List<NMContainerStatus> statuses = request.getNMContainerStatuses();
                                try {
                                    Assert.assertEquals(1, statuses.size());
                                    Assert.assertEquals(testCompleteContainer.getContainerId(), statuses.get(0).getContainerId());
                                } catch (AssertionError error) {
                                    error.printStackTrace();
                                    assertionFailedInThread.set(true);
                                }
                            }
                            registerCount++;
                            return super.registerNodeManager(request);
                        }

                        @Override
                        public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) {
                            // first heartBeat contains the completed container info
                            List<ContainerStatus> statuses = request.getNodeStatus().getContainersStatuses();
                            try {
                                Assert.assertEquals(1, statuses.size());
                                Assert.assertEquals(testCompleteContainer.getContainerId(), statuses.get(0).getContainerId());
                            } catch (AssertionError error) {
                                error.printStackTrace();
                                assertionFailedInThread.set(true);
                            }
                            // notify RESYNC on first heartbeat.
                            return YarnServerBuilderUtils.newNodeHeartbeatResponse(1, NodeAction.RESYNC, null, null, null, null, 1000L);
                        }
                    };
                }
            };
        }
    };
    YarnConfiguration conf = createNMConfig();
    nm.init(conf);
    nm.start();
    try {
        syncBarrier.await();
    } catch (BrokenBarrierException e) {
    }
    Assert.assertFalse(assertionFailedInThread.get());
    nm.stop();
}
Also used : FileContext(org.apache.hadoop.fs.FileContext) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) RegisterNodeManagerRequest(org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest) NMContainerStatus(org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) NMContainerStatus(org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus) NodeHeartbeatRequest(org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest) Application(org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application) BaseContainerManagerTest(org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest) Test(org.junit.Test)

Example 18 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class TestRM method testApplicationKillAtAcceptedState.

/**
   * Validate killing an application when it is at accepted state.
   * @throws Exception exception
   */
@Test(timeout = 60000)
public void testApplicationKillAtAcceptedState() throws Exception {
    final Dispatcher dispatcher = new DrainDispatcher() {

        @Override
        public EventHandler<Event> getEventHandler() {
            class EventArgMatcher extends ArgumentMatcher<AbstractEvent> {

                @Override
                public boolean matches(Object argument) {
                    if (argument instanceof RMAppAttemptEvent) {
                        if (((RMAppAttemptEvent) argument).getType().equals(RMAppAttemptEventType.KILL)) {
                            return true;
                        }
                    }
                    return false;
                }
            }
            EventHandler handler = spy(super.getEventHandler());
            doNothing().when(handler).handle(argThat(new EventArgMatcher()));
            return handler;
        }
    };
    MockRM rm = new MockRM(conf) {

        @Override
        protected Dispatcher createDispatcher() {
            return dispatcher;
        }
    };
    // test metrics
    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
    int appsKilled = metrics.getAppsKilled();
    int appsSubmitted = metrics.getAppsSubmitted();
    rm.start();
    MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
    nm1.registerNode();
    // a failed app
    RMApp application = rm.submitApp(200);
    MockAM am = MockRM.launchAM(application, rm, nm1);
    rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED);
    nm1.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.RUNNING);
    rm.waitForState(application.getApplicationId(), RMAppState.ACCEPTED);
    // Now kill the application before new attempt is launched, the app report
    // returns the invalid AM host and port.
    KillApplicationRequest request = KillApplicationRequest.newInstance(application.getApplicationId());
    rm.getClientRMService().forceKillApplication(request);
    // Specific test for YARN-1689 follows
    // Now let's say a race causes AM to register now. This should not crash RM.
    am.registerAppAttempt(false);
    // We explicitly intercepted the kill-event to RMAppAttempt, so app should
    // still be in KILLING state.
    rm.waitForState(application.getApplicationId(), RMAppState.KILLING);
    // AM should now be in running
    rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.RUNNING);
    // Simulate that appAttempt is killed.
    rm.getRMContext().getDispatcher().getEventHandler().handle(new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_KILLED));
    rm.waitForState(application.getApplicationId(), RMAppState.KILLED);
    // test metrics
    metrics = rm.getResourceScheduler().getRootQueueMetrics();
    Assert.assertEquals(appsKilled + 1, metrics.getAppsKilled());
    Assert.assertEquals(appsSubmitted + 1, metrics.getAppsSubmitted());
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) EventHandler(org.apache.hadoop.yarn.event.EventHandler) KillApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) QueueMetrics(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics) ArgumentMatcher(org.mockito.ArgumentMatcher) AbstractEvent(org.apache.hadoop.yarn.event.AbstractEvent) Event(org.apache.hadoop.yarn.event.Event) RMAppAttemptEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) RMAppAttemptEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent) Test(org.junit.Test)

Example 19 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class TestRM method testKillFinishingApp.

// Test Kill an app while the app is finishing in the meanwhile.
@Test(timeout = 30000)
public void testKillFinishingApp() throws Exception {
    // this dispatcher ignores RMAppAttemptEventType.KILL event
    final Dispatcher dispatcher = new DrainDispatcher() {

        @Override
        public EventHandler<Event> getEventHandler() {
            class EventArgMatcher extends ArgumentMatcher<AbstractEvent> {

                @Override
                public boolean matches(Object argument) {
                    if (argument instanceof RMAppAttemptEvent) {
                        if (((RMAppAttemptEvent) argument).getType().equals(RMAppAttemptEventType.KILL)) {
                            return true;
                        }
                    }
                    return false;
                }
            }
            EventHandler handler = spy(super.getEventHandler());
            doNothing().when(handler).handle(argThat(new EventArgMatcher()));
            return handler;
        }
    };
    MockRM rm1 = new MockRM(conf) {

        @Override
        protected Dispatcher createDispatcher() {
            return dispatcher;
        }
    };
    rm1.start();
    MockNM nm1 = new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
    nm1.registerNode();
    RMApp app1 = rm1.submitApp(200);
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    rm1.killApp(app1.getApplicationId());
    FinishApplicationMasterRequest req = FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED, "", "");
    am1.unregisterAppAttempt(req, true);
    rm1.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FINISHING);
    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
    rm1.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FINISHED);
    rm1.waitForState(app1.getApplicationId(), RMAppState.FINISHED);
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) EventHandler(org.apache.hadoop.yarn.event.EventHandler) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) FinishApplicationMasterRequest(org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest) ArgumentMatcher(org.mockito.ArgumentMatcher) AbstractEvent(org.apache.hadoop.yarn.event.AbstractEvent) Event(org.apache.hadoop.yarn.event.Event) RMAppAttemptEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) RMAppAttemptEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent) Test(org.junit.Test)

Example 20 with Dispatcher

use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.

the class TestSystemMetricsPublisherForV2 method setup.

@BeforeClass
public static void setup() throws Exception {
    if (testRootDir.exists()) {
        //cleanup before hand
        FileContext.getLocalFSFileContext().delete(new Path(testRootDir.getAbsolutePath()), true);
    }
    RMContext rmContext = mock(RMContext.class);
    rmAppsMapInContext = new ConcurrentHashMap<ApplicationId, RMApp>();
    when(rmContext.getRMApps()).thenReturn(rmAppsMapInContext);
    rmTimelineCollectorManager = new RMTimelineCollectorManager(rmContext);
    when(rmContext.getRMTimelineCollectorManager()).thenReturn(rmTimelineCollectorManager);
    Configuration conf = getTimelineV2Conf();
    conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class);
    rmTimelineCollectorManager.init(conf);
    rmTimelineCollectorManager.start();
    dispatcher.init(conf);
    dispatcher.start();
    metricsPublisher = new TimelineServiceV2Publisher(rmContext) {

        @Override
        protected Dispatcher getDispatcher() {
            return dispatcher;
        }
    };
    metricsPublisher.init(conf);
    metricsPublisher.start();
}
Also used : Path(org.apache.hadoop.fs.Path) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) RMTimelineCollectorManager(org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) BeforeClass(org.junit.BeforeClass)

Aggregations

Dispatcher (org.apache.hadoop.yarn.event.Dispatcher)56 Test (org.junit.Test)35 Configuration (org.apache.hadoop.conf.Configuration)26 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)21 Event (org.apache.hadoop.yarn.event.Event)18 AsyncDispatcher (org.apache.hadoop.yarn.event.AsyncDispatcher)15 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)14 FileContext (org.apache.hadoop.fs.FileContext)11 DrainDispatcher (org.apache.hadoop.yarn.event.DrainDispatcher)11 EventHandler (org.apache.hadoop.yarn.event.EventHandler)10 NMContext (org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext)10 RMContext (org.apache.hadoop.yarn.server.resourcemanager.RMContext)10 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)9 IOException (java.io.IOException)8 Job (org.apache.hadoop.mapreduce.v2.app.job.Job)7 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)7 Container (org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container)7 Path (org.apache.hadoop.fs.Path)6 JobId (org.apache.hadoop.mapreduce.v2.api.records.JobId)6 ArrayList (java.util.ArrayList)5