use of org.apache.hadoop.yarn.event.DrainDispatcher in project hadoop by apache.
the class TestClientToAMTokens method testClientTokenRace.
@Test(timeout = 20000)
public void testClientTokenRace() throws Exception {
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
ContainerManagementProtocol containerManager = mock(ContainerManagementProtocol.class);
StartContainersResponse mockResponse = mock(StartContainersResponse.class);
when(containerManager.startContainers((StartContainersRequest) any())).thenReturn(mockResponse);
final DrainDispatcher dispatcher = new DrainDispatcher();
MockRM rm = new MockRMWithCustomAMLauncher(conf, containerManager) {
protected ClientRMService createClientRMService() {
return new ClientRMService(this.rmContext, scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, getRMContext().getRMDelegationTokenSecretManager());
}
;
@Override
protected Dispatcher createDispatcher() {
return dispatcher;
}
@Override
protected void doSecureLogin() throws IOException {
}
};
rm.start();
// Submit an app
RMApp app = rm.submitApp(1024);
// Set up a node.
MockNM nm1 = rm.registerNode("localhost:1234", 3072);
nm1.nodeHeartbeat(true);
dispatcher.await();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttempt = app.getCurrentAppAttempt().getAppAttemptId();
final MockAM mockAM = new MockAM(rm.getRMContext(), rm.getApplicationMasterService(), app.getCurrentAppAttempt().getAppAttemptId());
UserGroupInformation appUgi = UserGroupInformation.createRemoteUser(appAttempt.toString());
RegisterApplicationMasterResponse response = appUgi.doAs(new PrivilegedAction<RegisterApplicationMasterResponse>() {
@Override
public RegisterApplicationMasterResponse run() {
RegisterApplicationMasterResponse response = null;
try {
response = mockAM.registerAppAttempt();
} catch (Exception e) {
Assert.fail("Exception was not expected");
}
return response;
}
});
// Get the app-report.
GetApplicationReportRequest request = Records.newRecord(GetApplicationReportRequest.class);
request.setApplicationId(app.getApplicationId());
GetApplicationReportResponse reportResponse = rm.getClientRMService().getApplicationReport(request);
ApplicationReport appReport = reportResponse.getApplicationReport();
org.apache.hadoop.yarn.api.records.Token originalClientToAMToken = appReport.getClientToAMToken();
// ClientToAMToken master key should have been received on register
// application master response.
final ByteBuffer clientMasterKey = response.getClientToAMTokenMasterKey();
Assert.assertNotNull(clientMasterKey);
Assert.assertTrue(clientMasterKey.array().length > 0);
// Start the AM with the correct shared-secret.
ApplicationAttemptId appAttemptId = app.getAppAttempts().keySet().iterator().next();
Assert.assertNotNull(appAttemptId);
final CustomAM am = new CustomAM(appAttemptId, null);
am.init(conf);
am.start();
// Now the real test!
// Set up clients to be able to pick up correct tokens.
SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(originalClientToAMToken, am.address);
// Schedule the key to be set after a significant delay
Timer timer = new Timer();
TimerTask timerTask = new TimerTask() {
@Override
public void run() {
am.setClientSecretKey(clientMasterKey.array());
}
};
timer.schedule(timerTask, 250);
// connect should pause waiting for the master key to arrive
verifyValidToken(conf, am, token);
am.stop();
rm.stop();
}
use of org.apache.hadoop.yarn.event.DrainDispatcher in project hadoop by apache.
the class TestReservationSystemWithRMHA method addNodeCapacityToPlan.
private void addNodeCapacityToPlan(MockRM rm, int memory, int vCores) {
try {
rm.registerNode("127.0.0.1:1", memory, vCores);
int attempts = 10;
do {
DrainDispatcher dispatcher = (DrainDispatcher) rm1.getRMContext().getDispatcher();
dispatcher.await();
rm.getRMContext().getReservationSystem().synchronizePlan(ReservationSystemTestUtil.reservationQ, false);
if (rm.getRMContext().getReservationSystem().getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity().getMemorySize() > 0) {
break;
}
LOG.info("Waiting for node capacity to be added to plan");
Thread.sleep(100);
} while (attempts-- > 0);
if (attempts <= 0) {
Assert.fail("Exhausted attempts in checking if node capacity was " + "added to the plan");
}
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
use of org.apache.hadoop.yarn.event.DrainDispatcher in project hadoop by apache.
the class TestRMContainerImpl method testExpireWhileRunning.
@Test
public void testExpireWhileRunning() {
DrainDispatcher drainDispatcher = new DrainDispatcher();
EventHandler<RMAppAttemptEvent> appAttemptEventHandler = mock(EventHandler.class);
EventHandler generic = mock(EventHandler.class);
drainDispatcher.register(RMAppAttemptEventType.class, appAttemptEventHandler);
drainDispatcher.register(RMNodeEventType.class, generic);
drainDispatcher.init(new YarnConfiguration());
drainDispatcher.start();
NodeId nodeId = BuilderUtils.newNodeId("host", 3425);
ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
Resource resource = BuilderUtils.newResource(512, 1);
Priority priority = BuilderUtils.newPriority(5);
Container container = BuilderUtils.newContainer(containerId, nodeId, "host:3465", resource, priority, null);
ConcurrentMap<ApplicationId, RMApp> appMap = new ConcurrentHashMap<>();
RMApp rmApp = mock(RMApp.class);
appMap.putIfAbsent(appId, rmApp);
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO, true);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
when(rmContext.getRMApps()).thenReturn(appMap);
RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, nodeId, "user", rmContext);
assertEquals(RMContainerState.NEW, rmContainer.getState());
assertEquals(resource, rmContainer.getAllocatedResource());
assertEquals(nodeId, rmContainer.getAllocatedNode());
assertEquals(priority, rmContainer.getAllocatedSchedulerKey().getPriority());
verify(writer).containerStarted(any(RMContainer.class));
verify(publisher).containerCreated(any(RMContainer.class), anyLong());
rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.START));
drainDispatcher.await();
assertEquals(RMContainerState.ALLOCATED, rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.ACQUIRED));
drainDispatcher.await();
assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.LAUNCHED));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
assertEquals("http://host:3465/node/containerlogs/container_1_0001_01_000001/user", rmContainer.getLogURL());
// In RUNNING state. Verify EXPIRE and associated actions.
reset(appAttemptEventHandler);
ContainerStatus containerStatus = SchedulerUtils.createAbnormalContainerStatus(containerId, SchedulerUtils.EXPIRED_CONTAINER);
rmContainer.handle(new RMContainerFinishedEvent(containerId, containerStatus, RMContainerEventType.EXPIRE));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
verify(writer, never()).containerFinished(any(RMContainer.class));
verify(publisher, never()).containerFinished(any(RMContainer.class), anyLong());
}
use of org.apache.hadoop.yarn.event.DrainDispatcher in project hadoop by apache.
the class TestRMAppTransitions method setUp.
@Before
public void setUp() throws Exception {
conf = new YarnConfiguration();
AuthenticationMethod authMethod = AuthenticationMethod.SIMPLE;
if (isSecurityEnabled) {
authMethod = AuthenticationMethod.KERBEROS;
}
SecurityUtil.setAuthenticationMethod(authMethod, conf);
UserGroupInformation.setConfiguration(conf);
rmDispatcher = new DrainDispatcher();
ContainerAllocationExpirer containerAllocationExpirer = mock(ContainerAllocationExpirer.class);
AMLivelinessMonitor amLivelinessMonitor = mock(AMLivelinessMonitor.class);
AMLivelinessMonitor amFinishingMonitor = mock(AMLivelinessMonitor.class);
store = mock(RMStateStore.class);
writer = mock(RMApplicationHistoryWriter.class);
DelegationTokenRenewer renewer = mock(DelegationTokenRenewer.class);
RMContext realRMContext = new RMContextImpl(rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, renewer, new AMRMTokenSecretManager(conf, this.rmContext), new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), new ClientToAMTokenSecretManagerInRM());
((RMContextImpl) realRMContext).setStateStore(store);
publisher = mock(SystemMetricsPublisher.class);
realRMContext.setSystemMetricsPublisher(publisher);
realRMContext.setRMApplicationHistoryWriter(writer);
this.rmContext = spy(realRMContext);
ResourceScheduler resourceScheduler = mock(ResourceScheduler.class);
doReturn(null).when(resourceScheduler).getAppResourceUsageReport((ApplicationAttemptId) Matchers.any());
doReturn(resourceScheduler).when(rmContext).getScheduler();
doReturn(mock(RMTimelineCollectorManager.class)).when(rmContext).getRMTimelineCollectorManager();
rmDispatcher.register(RMAppAttemptEventType.class, new TestApplicationAttemptEventDispatcher(this.rmContext));
rmDispatcher.register(RMAppEventType.class, new TestApplicationEventDispatcher(rmContext));
rmDispatcher.register(RMAppManagerEventType.class, new TestApplicationManagerEventDispatcher());
schedulerDispatcher = new TestSchedulerEventDispatcher();
rmDispatcher.register(SchedulerEventType.class, schedulerDispatcher);
rmDispatcher.init(conf);
rmDispatcher.start();
}
use of org.apache.hadoop.yarn.event.DrainDispatcher in project hadoop by apache.
the class TestAbstractYarnScheduler method testNodemanagerReconnect.
/**
* Test the behavior of the scheduler when a node reconnects
* with changed capabilities. This test is to catch any race conditions
* that might occur due to the use of the RMNode object.
* @throws Exception
*/
@Test(timeout = 60000)
public void testNodemanagerReconnect() throws Exception {
configureScheduler();
Configuration conf = getConf();
MockRM rm = new MockRM(conf);
try {
rm.start();
conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, false);
DrainDispatcher privateDispatcher = new DrainDispatcher();
SleepHandler sleepHandler = new SleepHandler();
ResourceTrackerService privateResourceTrackerService = getPrivateResourceTrackerService(privateDispatcher, rm, sleepHandler);
// Register node1
String hostname1 = "localhost1";
Resource capability = BuilderUtils.newResource(4096, 4);
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
RegisterNodeManagerRequest request1 = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
NodeId nodeId1 = NodeId.newInstance(hostname1, 0);
request1.setNodeId(nodeId1);
request1.setHttpPort(0);
request1.setResource(capability);
privateResourceTrackerService.registerNodeManager(request1);
privateDispatcher.await();
Resource clusterResource = rm.getResourceScheduler().getClusterResource();
Assert.assertEquals("Initial cluster resources don't match", capability, clusterResource);
Resource newCapability = BuilderUtils.newResource(1024, 1);
RegisterNodeManagerRequest request2 = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
request2.setNodeId(nodeId1);
request2.setHttpPort(0);
request2.setResource(newCapability);
// hold up the disaptcher and register the same node with lower capability
sleepHandler.sleepFlag = true;
privateResourceTrackerService.registerNodeManager(request2);
privateDispatcher.await();
Assert.assertEquals("Cluster resources don't match", newCapability, rm.getResourceScheduler().getClusterResource());
privateResourceTrackerService.stop();
} finally {
rm.stop();
}
}
Aggregations