use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.
the class TestNodeStatusUpdaterForLabels method testInvalidNodeLabelsFromProvider.
@Test(timeout = 20000)
public void testInvalidNodeLabelsFromProvider() throws InterruptedException, IOException {
final ResourceTrackerForLabels resourceTracker = new ResourceTrackerForLabels();
nm = new NodeManager() {
@Override
protected NodeLabelsProvider createNodeLabelsProvider(Configuration conf) throws IOException {
return dummyLabelsProviderRef;
}
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker, NodeLabelsProvider labelsProvider) {
return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker, metrics, labelsProvider) {
@Override
protected ResourceTracker getRMClient() {
return resourceTracker;
}
@Override
protected void stopRMProxy() {
return;
}
};
}
};
dummyLabelsProviderRef.setNodeLabels(toNodeLabelSet("P"));
YarnConfiguration conf = createNMConfigForDistributeNodeLabels();
conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "0.0.0.0:" + ServerSocketUtil.getPort(8040, 10));
nm.init(conf);
resourceTracker.resetNMHeartbeatReceiveFlag();
nm.start();
// wait till the first heartbeat
resourceTracker.waitTillHeartbeat();
resourceTracker.resetNMHeartbeatReceiveFlag();
// heartbeat with invalid labels
dummyLabelsProviderRef.setNodeLabels(toNodeLabelSet("_.P"));
sendOutofBandHeartBeat();
resourceTracker.waitTillHeartbeat();
assertNull("On Invalid Labels we need to retain earlier labels, HB " + "needs to send null", resourceTracker.labels);
resourceTracker.resetNMHeartbeatReceiveFlag();
// on next heartbeat same invalid labels will be given by the provider, but
// again label validation check and reset RM with empty labels set should
// not happen
sendOutofBandHeartBeat();
resourceTracker.waitTillHeartbeat();
assertNull("NodeStatusUpdater need not send repeatedly empty labels on " + "invalid labels from provider ", resourceTracker.labels);
resourceTracker.resetNMHeartbeatReceiveFlag();
}
use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.
the class TestNodeManagerResync method testNMSentContainerStatusOnResync.
// This is to test when NM gets the resync response from last heart beat, it
// should be able to send the already-sent-via-last-heart-beat container
// statuses again when it re-register with RM.
@Test
public void testNMSentContainerStatusOnResync() throws Exception {
final ContainerStatus testCompleteContainer = TestNodeStatusUpdater.createContainerStatus(2, ContainerState.COMPLETE);
final Container container = TestNodeStatusUpdater.getMockContainer(testCompleteContainer);
NMContainerStatus report = createNMContainerStatus(2, ContainerState.COMPLETE);
when(container.getNMContainerStatus()).thenReturn(report);
NodeManager nm = new NodeManager() {
int registerCount = 0;
@Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new TestNodeStatusUpdaterResync(context, dispatcher, healthChecker, metrics) {
@Override
protected ResourceTracker createResourceTracker() {
return new MockResourceTracker() {
@Override
public RegisterNodeManagerResponse registerNodeManager(RegisterNodeManagerRequest request) throws YarnException, IOException {
if (registerCount == 0) {
// first register, no containers info.
try {
Assert.assertEquals(0, request.getNMContainerStatuses().size());
} catch (AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
// put the completed container into the context
getNMContext().getContainers().put(testCompleteContainer.getContainerId(), container);
getNMContext().getApplications().put(testCompleteContainer.getContainerId().getApplicationAttemptId().getApplicationId(), mock(Application.class));
} else {
// second register contains the completed container info.
List<NMContainerStatus> statuses = request.getNMContainerStatuses();
try {
Assert.assertEquals(1, statuses.size());
Assert.assertEquals(testCompleteContainer.getContainerId(), statuses.get(0).getContainerId());
} catch (AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
}
registerCount++;
return super.registerNodeManager(request);
}
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) {
// first heartBeat contains the completed container info
List<ContainerStatus> statuses = request.getNodeStatus().getContainersStatuses();
try {
Assert.assertEquals(1, statuses.size());
Assert.assertEquals(testCompleteContainer.getContainerId(), statuses.get(0).getContainerId());
} catch (AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
// notify RESYNC on first heartbeat.
return YarnServerBuilderUtils.newNodeHeartbeatResponse(1, NodeAction.RESYNC, null, null, null, null, 1000L);
}
};
}
};
}
};
YarnConfiguration conf = createNMConfig();
nm.init(conf);
nm.start();
try {
syncBarrier.await();
} catch (BrokenBarrierException e) {
}
Assert.assertFalse(assertionFailedInThread.get());
nm.stop();
}
use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.
the class TestRM method testApplicationKillAtAcceptedState.
/**
* Validate killing an application when it is at accepted state.
* @throws Exception exception
*/
@Test(timeout = 60000)
public void testApplicationKillAtAcceptedState() throws Exception {
final Dispatcher dispatcher = new DrainDispatcher() {
@Override
public EventHandler<Event> getEventHandler() {
class EventArgMatcher extends ArgumentMatcher<AbstractEvent> {
@Override
public boolean matches(Object argument) {
if (argument instanceof RMAppAttemptEvent) {
if (((RMAppAttemptEvent) argument).getType().equals(RMAppAttemptEventType.KILL)) {
return true;
}
}
return false;
}
}
EventHandler handler = spy(super.getEventHandler());
doNothing().when(handler).handle(argThat(new EventArgMatcher()));
return handler;
}
};
MockRM rm = new MockRM(conf) {
@Override
protected Dispatcher createDispatcher() {
return dispatcher;
}
};
// test metrics
QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
int appsKilled = metrics.getAppsKilled();
int appsSubmitted = metrics.getAppsSubmitted();
rm.start();
MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
nm1.registerNode();
// a failed app
RMApp application = rm.submitApp(200);
MockAM am = MockRM.launchAM(application, rm, nm1);
rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED);
nm1.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.RUNNING);
rm.waitForState(application.getApplicationId(), RMAppState.ACCEPTED);
// Now kill the application before new attempt is launched, the app report
// returns the invalid AM host and port.
KillApplicationRequest request = KillApplicationRequest.newInstance(application.getApplicationId());
rm.getClientRMService().forceKillApplication(request);
// Specific test for YARN-1689 follows
// Now let's say a race causes AM to register now. This should not crash RM.
am.registerAppAttempt(false);
// We explicitly intercepted the kill-event to RMAppAttempt, so app should
// still be in KILLING state.
rm.waitForState(application.getApplicationId(), RMAppState.KILLING);
// AM should now be in running
rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.RUNNING);
// Simulate that appAttempt is killed.
rm.getRMContext().getDispatcher().getEventHandler().handle(new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_KILLED));
rm.waitForState(application.getApplicationId(), RMAppState.KILLED);
// test metrics
metrics = rm.getResourceScheduler().getRootQueueMetrics();
Assert.assertEquals(appsKilled + 1, metrics.getAppsKilled());
Assert.assertEquals(appsSubmitted + 1, metrics.getAppsSubmitted());
}
use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.
the class TestRM method testKillFinishingApp.
// Test Kill an app while the app is finishing in the meanwhile.
@Test(timeout = 30000)
public void testKillFinishingApp() throws Exception {
// this dispatcher ignores RMAppAttemptEventType.KILL event
final Dispatcher dispatcher = new DrainDispatcher() {
@Override
public EventHandler<Event> getEventHandler() {
class EventArgMatcher extends ArgumentMatcher<AbstractEvent> {
@Override
public boolean matches(Object argument) {
if (argument instanceof RMAppAttemptEvent) {
if (((RMAppAttemptEvent) argument).getType().equals(RMAppAttemptEventType.KILL)) {
return true;
}
}
return false;
}
}
EventHandler handler = spy(super.getEventHandler());
doNothing().when(handler).handle(argThat(new EventArgMatcher()));
return handler;
}
};
MockRM rm1 = new MockRM(conf) {
@Override
protected Dispatcher createDispatcher() {
return dispatcher;
}
};
rm1.start();
MockNM nm1 = new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1 = rm1.submitApp(200);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
rm1.killApp(app1.getApplicationId());
FinishApplicationMasterRequest req = FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED, "", "");
am1.unregisterAppAttempt(req, true);
rm1.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FINISHING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
rm1.waitForState(am1.getApplicationAttemptId(), RMAppAttemptState.FINISHED);
rm1.waitForState(app1.getApplicationId(), RMAppState.FINISHED);
}
use of org.apache.hadoop.yarn.event.Dispatcher in project hadoop by apache.
the class TestSystemMetricsPublisherForV2 method setup.
@BeforeClass
public static void setup() throws Exception {
if (testRootDir.exists()) {
//cleanup before hand
FileContext.getLocalFSFileContext().delete(new Path(testRootDir.getAbsolutePath()), true);
}
RMContext rmContext = mock(RMContext.class);
rmAppsMapInContext = new ConcurrentHashMap<ApplicationId, RMApp>();
when(rmContext.getRMApps()).thenReturn(rmAppsMapInContext);
rmTimelineCollectorManager = new RMTimelineCollectorManager(rmContext);
when(rmContext.getRMTimelineCollectorManager()).thenReturn(rmTimelineCollectorManager);
Configuration conf = getTimelineV2Conf();
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS, FileSystemTimelineWriterImpl.class, TimelineWriter.class);
rmTimelineCollectorManager.init(conf);
rmTimelineCollectorManager.start();
dispatcher.init(conf);
dispatcher.start();
metricsPublisher = new TimelineServiceV2Publisher(rmContext) {
@Override
protected Dispatcher getDispatcher() {
return dispatcher;
}
};
metricsPublisher.init(conf);
metricsPublisher.start();
}
Aggregations