use of org.apache.hadoop.yarn.server.resourcemanager.ResourceManager in project hadoop by apache.
the class TestRMNMSecretKeys method validateRMNMKeyExchange.
private void validateRMNMKeyExchange(YarnConfiguration conf) throws Exception {
// Default rolling and activation intervals are large enough, no need to
// intervene
final DrainDispatcher dispatcher = new DrainDispatcher();
ResourceManager rm = new ResourceManager() {
@Override
protected void doSecureLogin() throws IOException {
// Do nothing.
}
@Override
protected Dispatcher createDispatcher() {
return dispatcher;
}
@Override
protected void startWepApp() {
// Don't need it, skip.
}
};
rm.init(conf);
rm.start();
// Testing ContainerToken and NMToken
String containerToken = "Container Token : ";
String nmToken = "NM Token : ";
MockNM nm = new MockNM("host:1234", 3072, rm.getResourceTrackerService());
RegisterNodeManagerResponse registrationResponse = nm.registerNode();
MasterKey containerTokenMasterKey = registrationResponse.getContainerTokenMasterKey();
Assert.assertNotNull(containerToken + "Registration should cause a key-update!", containerTokenMasterKey);
MasterKey nmTokenMasterKey = registrationResponse.getNMTokenMasterKey();
Assert.assertNotNull(nmToken + "Registration should cause a key-update!", nmTokenMasterKey);
dispatcher.await();
NodeHeartbeatResponse response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken + "First heartbeat after registration shouldn't get any key updates!", response.getContainerTokenMasterKey());
Assert.assertNull(nmToken + "First heartbeat after registration shouldn't get any key updates!", response.getNMTokenMasterKey());
dispatcher.await();
response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken + "Even second heartbeat after registration shouldn't get any key updates!", response.getContainerTokenMasterKey());
Assert.assertNull(nmToken + "Even second heartbeat after registration shouldn't get any key updates!", response.getContainerTokenMasterKey());
dispatcher.await();
// Let's force a roll-over
rm.getRMContext().getContainerTokenSecretManager().rollMasterKey();
rm.getRMContext().getNMTokenSecretManager().rollMasterKey();
// Heartbeats after roll-over and before activation should be fine.
response = nm.nodeHeartbeat(true);
Assert.assertNotNull(containerToken + "Heartbeats after roll-over and before activation should not err out.", response.getContainerTokenMasterKey());
Assert.assertNotNull(nmToken + "Heartbeats after roll-over and before activation should not err out.", response.getNMTokenMasterKey());
Assert.assertEquals(containerToken + "Roll-over should have incremented the key-id only by one!", containerTokenMasterKey.getKeyId() + 1, response.getContainerTokenMasterKey().getKeyId());
Assert.assertEquals(nmToken + "Roll-over should have incremented the key-id only by one!", nmTokenMasterKey.getKeyId() + 1, response.getNMTokenMasterKey().getKeyId());
dispatcher.await();
response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken + "Second heartbeat after roll-over shouldn't get any key updates!", response.getContainerTokenMasterKey());
Assert.assertNull(nmToken + "Second heartbeat after roll-over shouldn't get any key updates!", response.getNMTokenMasterKey());
dispatcher.await();
// Let's force activation
rm.getRMContext().getContainerTokenSecretManager().activateNextMasterKey();
rm.getRMContext().getNMTokenSecretManager().activateNextMasterKey();
response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken + "Activation shouldn't cause any key updates!", response.getContainerTokenMasterKey());
Assert.assertNull(nmToken + "Activation shouldn't cause any key updates!", response.getNMTokenMasterKey());
dispatcher.await();
response = nm.nodeHeartbeat(true);
Assert.assertNull(containerToken + "Even second heartbeat after activation shouldn't get any key updates!", response.getContainerTokenMasterKey());
Assert.assertNull(nmToken + "Even second heartbeat after activation shouldn't get any key updates!", response.getNMTokenMasterKey());
dispatcher.await();
rm.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.ResourceManager in project hadoop by apache.
the class TestCapacityScheduler method setUp.
@Before
public void setUp() throws Exception {
resourceManager = new ResourceManager() {
@Override
protected RMNodeLabelsManager createNodeLabelManager() {
RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
mgr.init(getConfig());
return mgr;
}
};
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
YarnConfiguration conf = new YarnConfiguration(csConf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
resourceManager.init(conf);
resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
resourceManager.getRMContext().getNMTokenSecretManager().rollMasterKey();
((AsyncDispatcher) resourceManager.getRMContext().getDispatcher()).start();
mockContext = mock(RMContext.class);
when(mockContext.getConfigurationProvider()).thenReturn(new LocalConfigurationProvider());
}
use of org.apache.hadoop.yarn.server.resourcemanager.ResourceManager in project hadoop by apache.
the class TestZKRMStateStore method testZKRootPathAcls.
/**
* Test if RM can successfully start in HA disabled mode if it was previously
* running in HA enabled mode. And then start it in HA mode after running it
* with HA disabled. NoAuth Exception should not be sent by zookeeper and RM
* should start successfully.
*/
@Test
public void testZKRootPathAcls() throws Exception {
StateChangeRequestInfo req = new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
String rootPath = YarnConfiguration.DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH + "/" + ZKRMStateStore.ROOT_ZNODE_NAME;
// Start RM with HA enabled
Configuration conf = createHARMConf("rm1,rm2", "rm1", 1234, false, curatorTestingServer);
ResourceManager rm = new MockRM(conf);
rm.start();
rm.getRMContext().getRMAdminService().transitionToActive(req);
List<ACL> acls = ((ZKRMStateStore) rm.getRMContext().getStateStore()).getACL(rootPath);
assertEquals(acls.size(), 2);
// CREATE and DELETE permissions for root node based on RM ID
verifyZKACL("digest", "localhost", Perms.CREATE | Perms.DELETE, acls);
verifyZKACL("world", "anyone", Perms.ALL ^ (Perms.CREATE | Perms.DELETE), acls);
rm.close();
// Now start RM with HA disabled. NoAuth Exception should not be thrown.
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, false);
rm = new MockRM(conf);
rm.start();
rm.getRMContext().getRMAdminService().transitionToActive(req);
acls = ((ZKRMStateStore) rm.getRMContext().getStateStore()).getACL(rootPath);
assertEquals(acls.size(), 1);
verifyZKACL("world", "anyone", Perms.ALL, acls);
rm.close();
// Start RM with HA enabled.
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
rm = new MockRM(conf);
rm.start();
rm.getRMContext().getRMAdminService().transitionToActive(req);
acls = ((ZKRMStateStore) rm.getRMContext().getStateStore()).getACL(rootPath);
assertEquals(acls.size(), 2);
verifyZKACL("digest", "localhost", Perms.CREATE | Perms.DELETE, acls);
verifyZKACL("world", "anyone", Perms.ALL ^ (Perms.CREATE | Perms.DELETE), acls);
rm.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.ResourceManager in project hadoop by apache.
the class TestZKRMStateStore method testFencing.
@SuppressWarnings("unchecked")
@Test
public void testFencing() throws Exception {
StateChangeRequestInfo req = new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
Configuration conf1 = createHARMConf("rm1,rm2", "rm1", 1234, false, curatorTestingServer);
ResourceManager rm1 = new MockRM(conf1);
rm1.start();
rm1.getRMContext().getRMAdminService().transitionToActive(req);
assertEquals("RM with ZKStore didn't start", Service.STATE.STARTED, rm1.getServiceState());
assertEquals("RM should be Active", HAServiceProtocol.HAServiceState.ACTIVE, rm1.getRMContext().getRMAdminService().getServiceStatus().getState());
Configuration conf2 = createHARMConf("rm1,rm2", "rm2", 5678, false, curatorTestingServer);
ResourceManager rm2 = new MockRM(conf2);
rm2.start();
rm2.getRMContext().getRMAdminService().transitionToActive(req);
assertEquals("RM with ZKStore didn't start", Service.STATE.STARTED, rm2.getServiceState());
assertEquals("RM should be Active", HAServiceProtocol.HAServiceState.ACTIVE, rm2.getRMContext().getRMAdminService().getServiceStatus().getState());
for (int i = 0; i < ZK_TIMEOUT_MS / 50; i++) {
if (HAServiceProtocol.HAServiceState.ACTIVE == rm1.getRMContext().getRMAdminService().getServiceStatus().getState()) {
Thread.sleep(100);
}
}
assertEquals("RM should have been fenced", HAServiceProtocol.HAServiceState.STANDBY, rm1.getRMContext().getRMAdminService().getServiceStatus().getState());
assertEquals("RM should be Active", HAServiceProtocol.HAServiceState.ACTIVE, rm2.getRMContext().getRMAdminService().getServiceStatus().getState());
}
use of org.apache.hadoop.yarn.server.resourcemanager.ResourceManager in project hadoop by apache.
the class TestRMWebApp method mockFifoRm.
public static ResourceManager mockFifoRm(int apps, int racks, int nodes, int mbsPerNode) throws Exception {
ResourceManager rm = mock(ResourceManager.class);
RMContext rmContext = mockRMContext(apps, racks, nodes, mbsPerNode);
ResourceScheduler rs = mockFifoScheduler(rmContext);
when(rm.getResourceScheduler()).thenReturn(rs);
when(rm.getRMContext()).thenReturn(rmContext);
return rm;
}
Aggregations