use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestLinuxContainerExecutor method testLocalUser.
@Test
public void testLocalUser() throws Exception {
Assume.assumeTrue(shouldRun());
try {
// nonsecure default
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
UserGroupInformation.setConfiguration(conf);
LinuxContainerExecutor lce = new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER, lce.getRunAsUser("foo"));
// nonsecure custom setting
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, "bar");
lce = new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals("bar", lce.getRunAsUser("foo"));
// nonsecure without limits
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY, "bar");
conf.setBoolean(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS, false);
lce = new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals("foo", lce.getRunAsUser("foo"));
// secure
conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
lce = new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals("foo", lce.getRunAsUser("foo"));
} finally {
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple");
UserGroupInformation.setConfiguration(conf);
}
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestDeletionService method testStopWithDelayedTasks.
@Test
public void testStopWithDelayedTasks() throws Exception {
DeletionService del = new DeletionService(Mockito.mock(ContainerExecutor.class));
Configuration conf = new YarnConfiguration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 60);
try {
del.init(conf);
del.start();
del.delete("dingo", new Path("/does/not/exist"));
} finally {
del.stop();
}
assertTrue(del.isTerminated());
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestDeletionService method testRecovery.
@Test
public void testRecovery() throws Exception {
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List<Path> baseDirs = buildDirs(r, base, 4);
createDirs(new Path("."), baseDirs);
List<Path> content = buildDirs(r, new Path("."), 10);
for (Path b : baseDirs) {
createDirs(b, content);
}
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 1);
NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
DeletionService del = new DeletionService(new FakeDefaultContainerExecutor(), stateStore);
try {
del.init(conf);
del.start();
for (Path p : content) {
assertTrue(lfs.util().exists(new Path(baseDirs.get(0), p)));
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p, baseDirs.toArray(new Path[4]));
}
// restart the deletion service
del.stop();
del = new DeletionService(new FakeDefaultContainerExecutor(), stateStore);
del.init(conf);
del.start();
// verify paths are still eventually deleted
int msecToWait = 10 * 1000;
for (Path p : baseDirs) {
for (Path q : content) {
Path fp = new Path(p, q);
while (msecToWait > 0 && lfs.util().exists(fp)) {
Thread.sleep(100);
msecToWait -= 100;
}
assertFalse(lfs.util().exists(fp));
}
}
} finally {
del.close();
stateStore.close();
}
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestLocalDirsHandlerService method testValidPathsDirHandlerService.
@Test
public void testValidPathsDirHandlerService() throws Exception {
Configuration conf = new YarnConfiguration();
String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
String localDir2 = new File("hdfs:///" + testDir, "localDir2").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
try {
dirSvc.init(conf);
Assert.fail("Service should have thrown an exception due to wrong URI");
} catch (YarnRuntimeException e) {
}
Assert.assertEquals("Service should not be inited", STATE.STOPPED, dirSvc.getServiceState());
dirSvc.close();
}
use of org.apache.hadoop.yarn.conf.YarnConfiguration in project hadoop by apache.
the class TestLocalDirsHandlerService method testGetFullDirs.
@Test
public void testGetFullDirs() throws Exception {
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext localFs = FileContext.getLocalFSFileContext(conf);
String localDir1 = new File(testDir, "localDir1").getPath();
String localDir2 = new File(testDir, "localDir2").getPath();
String logDir1 = new File(testDir, "logDir1").getPath();
String logDir2 = new File(testDir, "logDir2").getPath();
Path localDir1Path = new Path(localDir1);
Path logDir1Path = new Path(logDir1);
FsPermission dirPermissions = new FsPermission((short) 0410);
localFs.mkdir(localDir1Path, dirPermissions, true);
localFs.mkdir(logDir1Path, dirPermissions, true);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1 + "," + logDir2);
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 0.0f);
NodeManagerMetrics nm = NodeManagerMetrics.create();
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
Assert.assertEquals(0, dirSvc.getLocalDirs().size());
Assert.assertEquals(0, dirSvc.getLogDirs().size());
Assert.assertEquals(1, dirSvc.getDiskFullLocalDirs().size());
Assert.assertEquals(1, dirSvc.getDiskFullLogDirs().size());
// check the metrics
Assert.assertEquals(2, nm.getBadLocalDirs());
Assert.assertEquals(2, nm.getBadLogDirs());
Assert.assertEquals(0, nm.getGoodLocalDirsDiskUtilizationPerc());
Assert.assertEquals(0, nm.getGoodLogDirsDiskUtilizationPerc());
Assert.assertEquals("", dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS));
Assert.assertEquals("", dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS));
Assert.assertEquals(localDir1 + "," + localDir2, dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS));
Assert.assertEquals(logDir1 + "," + logDir2, dirSvc.getConfig().get(YarnConfiguration.NM_LOG_DIRS));
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 100.0f);
nm = NodeManagerMetrics.create();
dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
Assert.assertEquals(1, dirSvc.getLocalDirs().size());
Assert.assertEquals(1, dirSvc.getLogDirs().size());
Assert.assertEquals(0, dirSvc.getDiskFullLocalDirs().size());
Assert.assertEquals(0, dirSvc.getDiskFullLogDirs().size());
// check the metrics
File dir = new File(localDir1);
int utilizationPerc = (int) ((dir.getTotalSpace() - dir.getUsableSpace()) * 100 / dir.getTotalSpace());
Assert.assertEquals(1, nm.getBadLocalDirs());
Assert.assertEquals(1, nm.getBadLogDirs());
Assert.assertEquals(utilizationPerc, nm.getGoodLocalDirsDiskUtilizationPerc());
Assert.assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc());
Assert.assertEquals(localDir2, dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS));
Assert.assertEquals(logDir2, dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS));
Assert.assertEquals(localDir1 + "," + localDir2, dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS));
Assert.assertEquals(logDir1 + "," + logDir2, dirSvc.getConfig().get(YarnConfiguration.NM_LOG_DIRS));
FileUtils.deleteDirectory(new File(localDir1));
FileUtils.deleteDirectory(new File(localDir2));
FileUtils.deleteDirectory(new File(logDir1));
FileUtils.deleteDirectory(new File(logDir2));
dirSvc.close();
}
Aggregations