use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestEncryptionZones method testGetEZAsNonSuperUser.
/**
* Test getEncryptionZoneForPath as a non super user.
*/
@Test
public void testGetEZAsNonSuperUser() throws Exception {
final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
final Path testRoot = new Path("/tmp/TestEncryptionZones");
final Path superPath = new Path(testRoot, "superuseronly");
final Path superPathFile = new Path(superPath, "file1");
final Path allPath = new Path(testRoot, "accessall");
final Path allPathFile = new Path(allPath, "file1");
final Path nonEZDir = new Path(testRoot, "nonEZDir");
final Path nonEZFile = new Path(nonEZDir, "file1");
final Path nonexistent = new Path("/nonexistent");
final int len = 8192;
fsWrapper.mkdir(testRoot, new FsPermission((short) 0777), true);
fsWrapper.mkdir(superPath, new FsPermission((short) 0700), false);
fsWrapper.mkdir(allPath, new FsPermission((short) 0777), false);
fsWrapper.mkdir(nonEZDir, new FsPermission((short) 0777), false);
dfsAdmin.createEncryptionZone(superPath, TEST_KEY, NO_TRASH);
dfsAdmin.createEncryptionZone(allPath, TEST_KEY, NO_TRASH);
dfsAdmin.allowSnapshot(new Path("/"));
final Path newSnap = fs.createSnapshot(new Path("/"));
DFSTestUtil.createFile(fs, superPathFile, len, (short) 1, 0xFEED);
DFSTestUtil.createFile(fs, allPathFile, len, (short) 1, 0xFEED);
DFSTestUtil.createFile(fs, nonEZFile, len, (short) 1, 0xFEED);
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final HdfsAdmin userAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
// Check null arg
try {
userAdmin.getEncryptionZoneForPath(null);
fail("should have thrown NPE");
} catch (NullPointerException e) {
/*
* IWBNI we could use assertExceptionContains, but the NPE that is
* thrown has no message text.
*/
}
// Check operation with accessible paths
assertEquals("expected ez path", allPath.toString(), userAdmin.getEncryptionZoneForPath(allPath).getPath().toString());
assertEquals("expected ez path", allPath.toString(), userAdmin.getEncryptionZoneForPath(allPathFile).getPath().toString());
// Check operation with inaccessible (lack of permissions) path
try {
userAdmin.getEncryptionZoneForPath(superPathFile);
fail("expected AccessControlException");
} catch (AccessControlException e) {
assertExceptionContains("Permission denied:", e);
}
assertNull("expected null for nonexistent path", userAdmin.getEncryptionZoneForPath(nonexistent));
// Check operation with non-ez paths
assertNull("expected null for non-ez path", userAdmin.getEncryptionZoneForPath(nonEZDir));
assertNull("expected null for non-ez path", userAdmin.getEncryptionZoneForPath(nonEZFile));
// Check operation with snapshots
String snapshottedAllPath = newSnap.toString() + allPath.toString();
assertEquals("expected ez path", allPath.toString(), userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
/*
* Delete the file from the non-snapshot and test that it is still ok
* in the ez.
*/
fs.delete(allPathFile, false);
assertEquals("expected ez path", allPath.toString(), userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
// Delete the ez and make sure ss's ez is still ok.
fs.delete(allPath, true);
assertEquals("expected ez path", allPath.toString(), userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
assertNull("expected null for deleted file path", userAdmin.getEncryptionZoneForPath(allPathFile));
assertNull("expected null for deleted directory path", userAdmin.getEncryptionZoneForPath(allPath));
return null;
}
});
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestExtendedAcls method testDefaultAclExistingDirFile.
/**
* Set default ACL to a directory and make sure existing sub dirs/files
* does not have default acl.
* @throws IOException
*/
@Test
public void testDefaultAclExistingDirFile() throws Exception {
Path parent = new Path("/testDefaultAclExistingDirFile");
hdfs.mkdirs(parent);
// the old acls
List<AclEntry> acls1 = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", ALL));
// the new acls
List<AclEntry> acls2 = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
// set parent to old acl
hdfs.setAcl(parent, acls1);
Path childDir = new Path(parent, "childDir");
hdfs.mkdirs(childDir);
// the sub directory should also have the old acl
AclEntry[] childDirExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclStatus childDirAcl = hdfs.getAclStatus(childDir);
assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
Path childFile = new Path(childDir, "childFile");
// the sub file should also have the old acl
hdfs.create(childFile).close();
AclEntry[] childFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) };
AclStatus childFileAcl = hdfs.getAclStatus(childFile);
assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
// now change parent to new acls
hdfs.setAcl(parent, acls2);
// sub directory and sub file should still have the old acls
childDirAcl = hdfs.getAclStatus(childDir);
assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
childFileAcl = hdfs.getAclStatus(childFile);
assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
// now remove the parent acls
hdfs.removeAcl(parent);
// sub directory and sub file should still have the old acls
childDirAcl = hdfs.getAclStatus(childDir);
assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
childFileAcl = hdfs.getAclStatus(childFile);
assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
// check changing the access mode of the file
// mask out the access of group other for testing
hdfs.setPermission(childFile, new FsPermission((short) 0640));
boolean canAccess = tryAccess(childFile, "other", new String[] { "other" }, READ);
assertFalse(canAccess);
hdfs.delete(parent, true);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestExtendedAcls method testGradSubdirMoreAccess.
/**
* Create a parent dir and set default acl to allow foo read/write access.
* Create a sub dir and set default acl to allow bar group read/write access.
* parent dir/file can not be viewed/appended by bar group.
* parent dir/child dir/file can be viewed/appended by bar group.
* @throws Exception
*/
@Test
public void testGradSubdirMoreAccess() throws Exception {
Path parent = new Path("/testGradSubdirMoreAccess");
hdfs.mkdirs(parent);
List<AclEntry> aclsParent = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
List<AclEntry> aclsChild = Lists.newArrayList(aclEntry(DEFAULT, GROUP, "bar", READ_WRITE));
hdfs.setAcl(parent, aclsParent);
AclEntry[] parentDirExpectedAcl = new AclEntry[] { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclStatus parentAcl = hdfs.getAclStatus(parent);
assertArrayEquals(parentDirExpectedAcl, parentAcl.getEntries().toArray());
Path childDir = new Path(parent, "childDir");
hdfs.mkdirs(childDir);
hdfs.modifyAclEntries(childDir, aclsChild);
// child dir should inherit the default acls from parent, plus bar group
AclEntry[] childDirExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, GROUP, "bar", READ_WRITE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclStatus childDirAcl = hdfs.getAclStatus(childDir);
assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
Path parentFile = new Path(parent, "parentFile");
hdfs.create(parentFile).close();
hdfs.setPermission(parentFile, new FsPermission((short) 0640));
// parent dir/parent file allows foo to access but not bar group
AclEntry[] parentFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ_EXECUTE) };
AclStatus parentFileAcl = hdfs.getAclStatus(parentFile);
assertArrayEquals(parentFileExpectedAcl, parentFileAcl.getEntries().toArray());
Path childFile = new Path(childDir, "childFile");
hdfs.create(childFile).close();
hdfs.setPermission(childFile, new FsPermission((short) 0640));
// child dir/child file allows foo user and bar group to access
AclEntry[] childFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_WRITE) };
AclStatus childFileAcl = hdfs.getAclStatus(childFile);
assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
// parent file should not be accessible for bar group
assertFalse(tryAccess(parentFile, "barUser", new String[] { "bar" }, READ));
// child file should be accessible for bar group
assertTrue(tryAccess(childFile, "barUser", new String[] { "bar" }, READ));
// parent file should be accessible for foo user
assertTrue(tryAccess(parentFile, "foo", new String[] { "fooGroup" }, READ));
// child file should be accessible for foo user
assertTrue(tryAccess(childFile, "foo", new String[] { "fooGroup" }, READ));
hdfs.delete(parent, true);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestEditLogRace method testSaveImageWhileSyncInProgress.
/**
* The logSync() method in FSEditLog is unsynchronized whiel syncing
* so that other threads can concurrently enqueue edits while the prior
* sync is ongoing. This test checks that the log is saved correctly
* if the saveImage occurs while the syncing thread is in the unsynchronized middle section.
*
* This replicates the following manual test proposed by Konstantin:
* I start the name-node in debugger.
* I do -mkdir and stop the debugger in logSync() just before it does flush.
* Then I enter safe mode with another client
* I start saveNamepsace and stop the debugger in
* FSImage.saveFSImage() -> FSEditLog.createEditLogFile()
* -> EditLogFileOutputStream.create() ->
* after truncating the file but before writing LAYOUT_VERSION into it.
* Then I let logSync() run.
* Then I terminate the name-node.
* After that the name-node wont start, since the edits file is broken.
*/
@Test
public void testSaveImageWhileSyncInProgress() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem = FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = fsimage.getEditLog();
JournalAndStream jas = editLog.getJournals().get(0);
EditLogFileOutputStream spyElos = spy((EditLogFileOutputStream) jas.getCurrentStream());
jas.setCurrentStreamForTests(spyElos);
final AtomicReference<Throwable> deferredException = new AtomicReference<Throwable>();
final CountDownLatch waitToEnterFlush = new CountDownLatch(1);
final Thread doAnEditThread = new Thread() {
@Override
public void run() {
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test", new PermissionStatus("test", "test", new FsPermission((short) 00755)), true);
LOG.info("mkdirs complete");
} catch (Throwable ioe) {
LOG.fatal("Got exception", ioe);
deferredException.set(ioe);
waitToEnterFlush.countDown();
}
}
};
Answer<Void> blockingFlush = new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
LOG.info("Flush called");
if (useAsyncEditLog || Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it to flush section...");
// Signal to main thread that the edit thread is in the racy section
waitToEnterFlush.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to flush. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("Flush complete");
return null;
}
};
doAnswer(blockingFlush).when(spyElos).flush();
doAnEditThread.start();
// Wait for the edit thread to get to the logsync unsynchronized section
LOG.info("Main thread: waiting to enter flush...");
waitToEnterFlush.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync is in unsynchronized section.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
long st = Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et = Time.now();
LOG.info("Entered safe mode");
// Make sure we really waited for the flush to complete!
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
// Once we're in safe mode, save namespace.
namesystem.saveNamespace(0, 0);
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
// We did 3 edits: begin, txn, and end
assertEquals(3, verifyEditLogs(namesystem, fsimage, NNStorage.getFinalizedEditsFileName(1, 3), 1));
// after the save, just the one "begin"
assertEquals(1, verifyEditLogs(namesystem, fsimage, NNStorage.getInProgressEditsFileName(4), 4));
} finally {
LOG.info("Closing nn");
if (namesystem != null)
namesystem.close();
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestFSPermissionChecker method setUp.
@Before
public void setUp() throws IOException {
Configuration conf = new Configuration();
FSNamesystem fsn = mock(FSNamesystem.class);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
FsPermission perm = (FsPermission) args[0];
return new PermissionStatus(SUPERUSER, SUPERGROUP, perm);
}
}).when(fsn).createFsOwnerPermissions(any(FsPermission.class));
dir = new FSDirectory(fsn, conf);
inodeRoot = dir.getRoot();
}
Aggregations