use of org.apache.hadoop.fs.permission.AclEntry in project hive by apache.
the class TestStorageBasedMetastoreAuthorizationProviderWithACL method allowWriteAccessViaAcl.
protected void allowWriteAccessViaAcl(String userName, String location) throws Exception {
// Set the FS perms to read-only access, and create ACL entries allowing write access.
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, FsAction.READ_EXECUTE), aclEntry(ACCESS, GROUP, FsAction.READ_EXECUTE), aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), aclEntry(ACCESS, USER, userName, FsAction.ALL));
FileSystem fs = FileSystem.get(new URI(location), clientHiveConf);
fs.setAcl(new Path(location), aclSpec);
}
use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.
the class TestFSImageWithAcl method doTestDefaultAclNewChildren.
private void doTestDefaultAclNewChildren(boolean persistNamespace) throws IOException {
Path dirPath = new Path("/dir");
Path filePath = new Path(dirPath, "file1");
Path subdirPath = new Path(dirPath, "subdir1");
DistributedFileSystem fs = cluster.getFileSystem();
fs.mkdirs(dirPath);
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(dirPath, aclSpec);
fs.create(filePath).close();
fs.mkdirs(subdirPath);
AclEntry[] fileExpected = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) };
AclEntry[] subdirExpected = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclEntry[] fileReturned = fs.getAclStatus(filePath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short) 010755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short) 010755);
aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE));
fs.modifyAclEntries(dirPath, aclSpec);
fileReturned = fs.getAclStatus(filePath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short) 010755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short) 010755);
fs.removeAcl(dirPath);
fileReturned = fs.getAclStatus(filePath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short) 010755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries().toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short) 010755);
}
use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.
the class TestFSImageWithAcl method testAcl.
private void testAcl(boolean persistNamespace) throws IOException {
Path p = new Path("/p");
DistributedFileSystem fs = cluster.getFileSystem();
fs.create(p).close();
fs.mkdirs(new Path("/23"));
AclEntry e = new AclEntry.Builder().setName("foo").setPermission(READ_EXECUTE).setScope(ACCESS).setType(USER).build();
fs.modifyAclEntries(p, Lists.newArrayList(e));
restart(fs, persistNamespace);
AclStatus s = cluster.getNamesystem().getAclStatus(p.toString());
AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ) }, returned);
fs.removeAcl(p);
if (persistNamespace) {
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
cluster.waitActive();
s = cluster.getNamesystem().getAclStatus(p.toString());
returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] {}, returned);
fs.modifyAclEntries(p, Lists.newArrayList(e));
s = cluster.getNamesystem().getAclStatus(p.toString());
returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ) }, returned);
}
use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.
the class TestEditLog method testResetThreadLocalCachedOps.
/**
* Edit log op instances are cached internally using thread-local storage.
* This test checks that the cached instances are reset in between different
* transactions processed on the same thread, so that we don't accidentally
* apply incorrect attributes to an inode.
*
* @throws IOException if there is an I/O error
*/
@Test
public void testResetThreadLocalCachedOps() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// Set single handler thread, so all transactions hit same thread-local ops.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
// Create /dir1 with a default ACL.
Path dir1 = new Path("/dir1");
fileSys.mkdirs(dir1);
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
fileSys.modifyAclEntries(dir1, aclSpec);
// /dir1/dir2 is expected to clone the default ACL.
Path dir2 = new Path("/dir1/dir2");
fileSys.mkdirs(dir2);
// /dir1/file1 is expected to clone the default ACL.
Path file1 = new Path("/dir1/file1");
fileSys.create(file1).close();
// /dir3 is not a child of /dir1, so must not clone the default ACL.
Path dir3 = new Path("/dir3");
fileSys.mkdirs(dir3);
// /file2 is not a child of /dir1, so must not clone the default ACL.
Path file2 = new Path("/file2");
fileSys.create(file2).close();
// Restart and assert the above stated expectations.
IOUtils.cleanup(LOG, fileSys);
cluster.restartNameNode();
fileSys = cluster.getFileSystem();
assertFalse(fileSys.getAclStatus(dir1).getEntries().isEmpty());
assertFalse(fileSys.getAclStatus(dir2).getEntries().isEmpty());
assertFalse(fileSys.getAclStatus(file1).getEntries().isEmpty());
assertTrue(fileSys.getAclStatus(dir3).getEntries().isEmpty());
assertTrue(fileSys.getAclStatus(file2).getEntries().isEmpty());
} finally {
IOUtils.cleanup(LOG, fileSys);
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.
the class TestAclWithSnapshot method doSnapshotContentsChangeAssertions.
private static void doSnapshotContentsChangeAssertions(Path filePath, Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath) throws Exception {
AclEntry[] expected = new AclEntry[] { aclEntry(ACCESS, USER, "diana", ALL), aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short) 010570, filePath);
assertFilePermissionDenied(fsAsBruce, BRUCE, filePath);
assertFilePermissionGranted(fsAsDiana, DIANA, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short) 010570, subdirPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionGranted(fsAsDiana, DIANA, subdirPath);
expected = new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) };
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short) 010550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short) 010550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
}
Aggregations