use of org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService in project hadoop by apache.
the class TestDiskFailures method testDirFailuresOnStartup.
/**
* Make a local and log directory inaccessible during initialization
* and verify those bad directories are recognized and removed from
* the list of available local and log directories.
* @throws IOException
*/
@Test
public void testDirFailuresOnStartup() throws IOException {
Configuration conf = new YarnConfiguration();
String localDir1 = new File(testDir, "localDir1").getPath();
String localDir2 = new File(testDir, "localDir2").getPath();
String logDir1 = new File(testDir, "logDir1").getPath();
String logDir2 = new File(testDir, "logDir2").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1 + "," + logDir2);
prepareDirToFail(localDir1);
prepareDirToFail(logDir2);
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
dirSvc.init(conf);
List<String> localDirs = dirSvc.getLocalDirs();
Assert.assertEquals(1, localDirs.size());
Assert.assertEquals(new Path(localDir2).toString(), localDirs.get(0));
List<String> logDirs = dirSvc.getLogDirs();
Assert.assertEquals(1, logDirs.size());
Assert.assertEquals(new Path(logDir1).toString(), logDirs.get(0));
}
use of org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService in project hadoop by apache.
the class TestContainerLogsPage method testContainerLogPageAccess.
@Test(timeout = 10000)
public void testContainerLogPageAccess() throws IOException {
// SecureIOUtils require Native IO to be enabled. This test will run
// only if it is enabled.
assumeTrue(NativeIO.isAvailable());
String user = "randomUser" + System.currentTimeMillis();
File absLogDir = null, appDir = null, containerDir = null, syslog = null;
try {
// target log directory
absLogDir = new File("target", TestContainerLogsPage.class.getSimpleName() + "LogDir").getAbsoluteFile();
absLogDir.mkdir();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS, absLogDir.toURI().toString());
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
// Add an application and the corresponding containers
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getAppId()).thenReturn(appId);
// Making sure that application returns a random user. This is required
// for SecureIOUtils' file owner check.
when(app.getUser()).thenReturn(user);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
// Testing secure read access for log files
// Creating application and container directory and syslog file.
appDir = new File(absLogDir, appId.toString());
appDir.mkdir();
containerDir = new File(appDir, container1.toString());
containerDir.mkdir();
syslog = new File(containerDir, "syslog");
syslog.createNewFile();
BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(syslog));
out.write("Log file Content".getBytes());
out.close();
Context context = mock(Context.class);
ConcurrentMap<ApplicationId, Application> appMap = new ConcurrentHashMap<ApplicationId, Application>();
appMap.put(appId, app);
when(context.getApplications()).thenReturn(appMap);
ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<ContainerId, Container>();
when(context.getContainers()).thenReturn(containers);
when(context.getLocalDirsHandler()).thenReturn(dirsHandler);
MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
container.setState(ContainerState.RUNNING);
context.getContainers().put(container1, container);
ContainersLogsBlock cLogsBlock = new ContainersLogsBlock(context);
Map<String, String> params = new HashMap<String, String>();
params.put(YarnWebParams.CONTAINER_ID, container1.toString());
params.put(YarnWebParams.CONTAINER_LOG_TYPE, "syslog");
Injector injector = WebAppTests.testPage(ContainerLogsPage.class, ContainersLogsBlock.class, cLogsBlock, params, (Module[]) null);
PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
verify(spyPw).write("Exception reading log file. Application submitted by '" + user + "' doesn't own requested log file : syslog");
} finally {
if (syslog != null) {
syslog.delete();
}
if (containerDir != null) {
containerDir.delete();
}
if (appDir != null) {
appDir.delete();
}
if (absLogDir != null) {
absLogDir.delete();
}
}
}
use of org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService in project hadoop by apache.
the class TestContainerLogsPage method testLogDirWithDriveLetter.
@Test
public void testLogDirWithDriveLetter() throws Exception {
//To verify that logs paths which include drive letters (Windows)
//do not lose their drive letter specification
LocalDirsHandlerService localDirs = mock(LocalDirsHandlerService.class);
List<String> logDirs = new ArrayList<String>();
logDirs.add("F:/nmlogs");
when(localDirs.getLogDirsForRead()).thenReturn(logDirs);
ApplicationIdPBImpl appId = mock(ApplicationIdPBImpl.class);
when(appId.toString()).thenReturn("app_id_1");
ApplicationAttemptIdPBImpl appAttemptId = mock(ApplicationAttemptIdPBImpl.class);
when(appAttemptId.getApplicationId()).thenReturn(appId);
ContainerId containerId = mock(ContainerIdPBImpl.class);
when(containerId.getApplicationAttemptId()).thenReturn(appAttemptId);
List<File> logDirFiles = ContainerLogsUtils.getContainerLogDirs(containerId, localDirs);
Assert.assertTrue("logDir lost drive letter " + logDirFiles.get(0), logDirFiles.get(0).toString().indexOf("F:" + File.separator + "nmlogs") > -1);
}
use of org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService in project hadoop by apache.
the class TestContainerLogsPage method testContainerLogDirs.
@Test(timeout = 30000)
public void testContainerLogDirs() throws IOException, YarnException {
File absLogDir = new File("target", TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
String logdirwithFile = absLogDir.toURI().toString();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService(conf);
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
// Add an application and the corresponding containers
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(conf);
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
nmContext.getApplications().put(appId, app);
MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(container1, container);
List<File> files = null;
files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
// After container is completed, it is removed from nmContext
nmContext.getContainers().remove(container1);
Assert.assertNull(nmContext.getContainers().get(container1));
files = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
// Create a new context to check if correct container log dirs are fetched
// on full disk.
LocalDirsHandlerService dirsHandlerForFullDisk = spy(dirsHandler);
// good log dirs are empty and nm log dir is in the full log dir list.
when(dirsHandlerForFullDisk.getLogDirs()).thenReturn(new ArrayList<String>());
when(dirsHandlerForFullDisk.getLogDirsForRead()).thenReturn(Arrays.asList(new String[] { absLogDir.getAbsolutePath() }));
nmContext = new NodeManager.NMContext(null, null, dirsHandlerForFullDisk, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
nmContext.getApplications().put(appId, app);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(container1, container);
List<File> dirs = ContainerLogsUtils.getContainerLogDirs(container1, user, nmContext);
File containerLogDir = new File(absLogDir, appId + "/" + container1);
Assert.assertTrue(dirs.contains(containerLogDir));
}
use of org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService in project hadoop by apache.
the class TestContainerLogsPage method testContainerLogFile.
@Test(timeout = 30000)
public void testContainerLogFile() throws IOException, YarnException {
File absLogDir = new File("target", TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
String logdirwithFile = absLogDir.toURI().toString();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 0.0f);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);
NMContext nmContext = new NodeManager.NMContext(null, null, dirsHandler, new ApplicationACLsManager(conf), new NMNullStateStoreService(), false, conf);
// Add an application and the corresponding containers
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId = BuilderUtils.newApplicationId(clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
nmContext.getApplications().put(appId, app);
MockContainer container = new MockContainer(appAttemptId, new AsyncDispatcher(), conf, user, appId, 1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(containerId, container);
File containerLogDir = new File(absLogDir, ContainerLaunch.getRelativeContainerLogDir(appId.toString(), containerId.toString()));
containerLogDir.mkdirs();
String fileName = "fileName";
File containerLogFile = new File(containerLogDir, fileName);
containerLogFile.createNewFile();
File file = ContainerLogsUtils.getContainerLogFile(containerId, fileName, user, nmContext);
Assert.assertEquals(containerLogFile.toURI().toString(), file.toURI().toString());
FileUtil.fullyDelete(absLogDir);
}
Aggregations