use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestNMTokenSecretManagerInNM method testRecovery.
@Test
public void testRecovery() throws IOException {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
final NodeId nodeId = NodeId.newInstance("somehost", 1234);
final ApplicationAttemptId attempt1 = ApplicationAttemptId.newInstance(ApplicationId.newInstance(1, 1), 1);
final ApplicationAttemptId attempt2 = ApplicationAttemptId.newInstance(ApplicationId.newInstance(2, 2), 2);
NMTokenKeyGeneratorForTest keygen = new NMTokenKeyGeneratorForTest();
NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
NMTokenSecretManagerInNM secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.setNodeId(nodeId);
MasterKey currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
NMTokenIdentifier attemptToken1 = getNMTokenId(secretMgr.createNMToken(attempt1, nodeId, "user1"));
NMTokenIdentifier attemptToken2 = getNMTokenId(secretMgr.createNMToken(attempt2, nodeId, "user2"));
secretMgr.appAttemptStartContainer(attemptToken1);
secretMgr.appAttemptStartContainer(attemptToken2);
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// restart and verify key is still there and token still valid
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// roll master key and remove an app
currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr.appFinished(attempt1.getApplicationId());
// restart and verify attempt1 key is still valid due to prev key persist
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// roll master key again, restart, and verify attempt1 key is bad but
// attempt2 is still good due to app key persist
currentKey = keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
} catch (InvalidToken e) {
// expected
}
assertNotNull(secretMgr.retrievePassword(attemptToken2));
// remove last attempt, restart, verify both tokens are now bad
secretMgr.appFinished(attempt2.getApplicationId());
secretMgr = new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey, secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
} catch (InvalidToken e) {
// expected
}
try {
secretMgr.retrievePassword(attemptToken2);
fail("attempt token should not still be valid");
} catch (InvalidToken e) {
// expected
}
stateStore.close();
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestCgroupsLCEResourcesHandler method testcheckAndDeleteCgroup.
@Test
public void testcheckAndDeleteCgroup() throws Exception {
CgroupsLCEResourcesHandler handler = new CgroupsLCEResourcesHandler();
handler.setConf(new YarnConfiguration());
handler.initConfig();
FileUtils.deleteQuietly(cgroupDir);
// Test 0
// tasks file not present, should return false
Assert.assertFalse(handler.checkAndDeleteCgroup(cgroupDir));
File tfile = new File(cgroupDir.getAbsolutePath(), "tasks");
FileOutputStream fos = FileUtils.openOutputStream(tfile);
File fspy = Mockito.spy(cgroupDir);
// Test 1, tasks file is empty
// tasks file has no data, should return true
Mockito.stub(fspy.delete()).toReturn(true);
Assert.assertTrue(handler.checkAndDeleteCgroup(fspy));
// Test 2, tasks file has data
fos.write("1234".getBytes());
fos.close();
// tasks has data, would not be able to delete, should return false
Assert.assertFalse(handler.checkAndDeleteCgroup(fspy));
FileUtils.deleteQuietly(cgroupDir);
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestCgroupsLCEResourcesHandler method testContainerLimits.
@Test
public void testContainerLimits() throws IOException {
LinuxContainerExecutor mockLCE = new MockLinuxContainerExecutor();
CustomCgroupsLCEResourceHandler handler = new CustomCgroupsLCEResourceHandler();
handler.generateLimitsMode = true;
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_DISK_RESOURCE_ENABLED, true);
final int numProcessors = 4;
ResourceCalculatorPlugin plugin = Mockito.mock(ResourceCalculatorPlugin.class);
Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
Mockito.doReturn(numProcessors).when(plugin).getNumCores();
handler.setConf(conf);
handler.initConfig();
// create mock cgroup
File cpuCgroupMountDir = TestCGroupsHandlerImpl.createMockCgroupMount(cgroupDir, "cpu");
// create mock mtab
File mockMtab = TestCGroupsHandlerImpl.createMockMTab(cgroupDir);
// setup our handler and call init()
handler.setMtabFile(mockMtab.getAbsolutePath());
handler.init(mockLCE, plugin);
// check the controller paths map isn't empty
ContainerId id = ContainerId.fromString("container_1_1_1_1");
handler.preExecute(id, Resource.newInstance(1024, 1));
Assert.assertNotNull(handler.getControllerPaths());
// check values
// default case - files shouldn't exist, strict mode off by default
File containerCpuDir = new File(cpuCgroupMountDir, id.toString());
Assert.assertTrue(containerCpuDir.exists());
Assert.assertTrue(containerCpuDir.isDirectory());
File periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
File quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
Assert.assertFalse(periodFile.exists());
Assert.assertFalse(quotaFile.exists());
// no files created because we're using all cpu
FileUtils.deleteQuietly(containerCpuDir);
conf.setBoolean(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, true);
handler.initConfig();
handler.preExecute(id, Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES));
Assert.assertTrue(containerCpuDir.exists());
Assert.assertTrue(containerCpuDir.isDirectory());
periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
Assert.assertFalse(periodFile.exists());
Assert.assertFalse(quotaFile.exists());
// 50% of CPU
FileUtils.deleteQuietly(containerCpuDir);
conf.setBoolean(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, true);
handler.initConfig();
handler.preExecute(id, Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES / 2));
Assert.assertTrue(containerCpuDir.exists());
Assert.assertTrue(containerCpuDir.isDirectory());
periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
Assert.assertTrue(periodFile.exists());
Assert.assertTrue(quotaFile.exists());
Assert.assertEquals(500 * 1000, readIntFromFile(periodFile));
Assert.assertEquals(1000 * 1000, readIntFromFile(quotaFile));
// CGroups set to 50% of CPU, container set to 50% of YARN CPU
FileUtils.deleteQuietly(containerCpuDir);
conf.setBoolean(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, true);
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 50);
handler.initConfig();
handler.init(mockLCE, plugin);
handler.preExecute(id, Resource.newInstance(1024, YarnConfiguration.DEFAULT_NM_VCORES / 2));
Assert.assertTrue(containerCpuDir.exists());
Assert.assertTrue(containerCpuDir.isDirectory());
periodFile = new File(containerCpuDir, "cpu.cfs_period_us");
quotaFile = new File(containerCpuDir, "cpu.cfs_quota_us");
Assert.assertTrue(periodFile.exists());
Assert.assertTrue(quotaFile.exists());
Assert.assertEquals(1000 * 1000, readIntFromFile(periodFile));
Assert.assertEquals(1000 * 1000, readIntFromFile(quotaFile));
FileUtils.deleteQuietly(cgroupDir);
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestNodeManagerHardwareUtils method testGetContainerCPU.
@Test
public void testGetContainerCPU() {
YarnConfiguration conf = new YarnConfiguration();
float ret;
final int numProcessors = 8;
final int numCores = 4;
ResourceCalculatorPlugin plugin = Mockito.mock(ResourceCalculatorPlugin.class);
Mockito.doReturn(numProcessors).when(plugin).getNumProcessors();
Mockito.doReturn(numCores).when(plugin).getNumCores();
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 0);
boolean catchFlag = false;
try {
NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.fail("getContainerCores should have thrown exception");
} catch (IllegalArgumentException ie) {
catchFlag = true;
}
Assert.assertTrue(catchFlag);
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 100);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(4, (int) ret);
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 50);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(2, (int) ret);
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 75);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(3, (int) ret);
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 85);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(3.4, ret, 0.1);
conf.setInt(YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT, 110);
ret = NodeManagerHardwareUtils.getContainersCPUs(plugin, conf);
Assert.assertEquals(4, (int) ret);
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class JobHistoryServer method launchJobHistoryServer.
static JobHistoryServer launchJobHistoryServer(String[] args) {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
JobHistoryServer jobHistoryServer = null;
try {
jobHistoryServer = new JobHistoryServer();
ShutdownHookManager.get().addShutdownHook(new CompositeServiceShutdownHook(jobHistoryServer), SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf());
new GenericOptionsParser(conf, args);
jobHistoryServer.init(conf);
jobHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting JobHistoryServer", t);
ExitUtil.terminate(-1, "Error starting JobHistoryServer");
}
return jobHistoryServer;
}
Aggregations