use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestNonAggregatingLogHandler method testFailedDirLogDeletion.
/*
* Test to ensure that we handle the cleanup of directories that may not have
* the application log dirs we're trying to delete or may have other problems.
* Test creates 7 log dirs, and fails the directory check for 4 of them and
* then checks to ensure we tried to delete only the ones that passed the
* check.
*/
@Test
public void testFailedDirLogDeletion() throws Exception {
File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 7);
final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length);
for (int i = 0; i < localLogDirs.length; i++) {
localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
}
String localLogDirsString = StringUtils.join(localLogDirPaths, ",");
conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 0l);
LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);
NonAggregatingLogHandler rawLogHandler = new NonAggregatingLogHandler(dispatcher, mockDelService, mockDirsHandler, new NMNullStateStoreService());
NonAggregatingLogHandler logHandler = spy(rawLogHandler);
AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
FileContext lfs = FileContext.getFileContext(spylfs, conf);
doReturn(lfs).when(logHandler).getLocalFileContext(isA(Configuration.class));
logHandler.init(conf);
logHandler.start();
runMockedFailedDirs(logHandler, appId, user, mockDelService, mockDirsHandler, conf, spylfs, lfs, localLogDirs);
logHandler.close();
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestLogAggregationService method testFailedDirsLocalFileDeletionAfterUpload.
/*
* Test to make sure we handle cases where the directories we get back from
* the LocalDirsHandler may have issues including the log dir not being
* present as well as other issues. The test uses helper functions from
* TestNonAggregatingLogHandler.
*/
@Test
public void testFailedDirsLocalFileDeletionAfterUpload() throws Exception {
// setup conf and services
DeletionService mockDelService = mock(DeletionService.class);
File[] localLogDirs = TestNonAggregatingLogHandler.getLocalLogDirFiles(this.getClass().getName(), 7);
final List<String> localLogDirPaths = new ArrayList<String>(localLogDirs.length);
for (int i = 0; i < localLogDirs.length; i++) {
localLogDirPaths.add(localLogDirs[i].getAbsolutePath());
}
String localLogDirsString = StringUtils.join(localLogDirPaths, ",");
this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString);
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath());
this.conf.setLong(YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS, 500);
ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(application1, 1);
this.dirsHandler = new LocalDirsHandlerService();
LocalDirsHandlerService mockDirsHandler = mock(LocalDirsHandlerService.class);
LogAggregationService logAggregationService = spy(new LogAggregationService(dispatcher, this.context, mockDelService, mockDirsHandler));
AbstractFileSystem spylfs = spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
FileContext lfs = FileContext.getFileContext(spylfs, conf);
doReturn(lfs).when(logAggregationService).getLocalFileContext(isA(Configuration.class));
logAggregationService.init(this.conf);
logAggregationService.start();
TestNonAggregatingLogHandler.runMockedFailedDirs(logAggregationService, application1, user, mockDelService, mockDirsHandler, conf, spylfs, lfs, localLogDirs);
logAggregationService.stop();
assertEquals(0, logAggregationService.getNumAggregators());
verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class));
ApplicationEvent[] expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(appAttemptId.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };
checkEvents(appEventHandler, expectedEvents, true, "getType", "getApplicationID");
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestFsck method testFsckSymlink.
/** Test fsck with symlinks in the filesystem. */
@Test
public void testFsckSymlink() throws Exception {
final DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
FileSystem fs = null;
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = cluster.getFileSystem();
final String fileName = "/srcdat";
util.createFiles(fs, fileName);
final FileContext fc = FileContext.getFileContext(cluster.getConfiguration(0));
final Path file = new Path(fileName);
final Path symlink = new Path("/srcdat-symlink");
fc.createSymlink(file, symlink, false);
util.waitReplication(fs, fileName, (short) 3);
long aTime = fc.getFileStatus(symlink).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr = runFsck(conf, 0, true, "/");
verifyAuditLogs();
assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertTrue(outStr.contains("Total symlinks:\t\t1"));
util.cleanup(fs, fileName);
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestPermissionSymlinks method doRenameLinkTargetNotWritableFC.
private void doRenameLinkTargetNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
// First FileContext
FileContext myfc = FileContext.getFileContext(conf);
Path newlink = new Path(linkParent, "newlink");
myfc.rename(link, newlink, Rename.NONE);
Path linkTarget = myfc.getLinkTarget(newlink);
assertEquals("Expected link's target to match target!", target, linkTarget);
return null;
}
});
assertTrue("Expected target to exist", wrapper.exists(target));
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestPermissionSymlinks method testAccess.
@Test
public void testAccess() throws Exception {
fs.setPermission(target, new FsPermission((short) 0002));
fs.setAcl(target, Arrays.asList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, GROUP, NONE), aclEntry(ACCESS, USER, user.getShortUserName(), WRITE), aclEntry(ACCESS, OTHER, WRITE)));
FileContext myfc = user.doAs(new PrivilegedExceptionAction<FileContext>() {
@Override
public FileContext run() throws IOException {
return FileContext.getFileContext(conf);
}
});
// Path to targetChild via symlink
myfc.access(link, FsAction.WRITE);
try {
myfc.access(link, FsAction.ALL);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
Path badPath = new Path(link, "bad");
try {
myfc.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (AccessControlException ace) {
// expected
String message = ace.getMessage();
assertTrue(message, message.contains("is not a directory"));
assertTrue(message.contains(target.toString()));
assertFalse(message.contains(badPath.toString()));
}
}
Aggregations