use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.
the class JsonUtil method toJsonString.
/** Convert a AclStatus object to a Json string. */
public static String toJsonString(final AclStatus status) {
if (status == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("owner", status.getOwner());
m.put("group", status.getGroup());
m.put("stickyBit", status.isStickyBit());
final List<String> stringEntries = new ArrayList<>();
for (AclEntry entry : status.getEntries()) {
stringEntries.add(entry.toStringStable());
}
m.put("entries", stringEntries);
FsPermission perm = status.getPermission();
if (perm != null) {
m.put("permission", toString(perm));
if (perm.getAclBit()) {
m.put("aclBit", true);
}
if (perm.getEncryptedBit()) {
m.put("encBit", true);
}
}
final Map<String, Map<String, Object>> finalMap = new TreeMap<String, Map<String, Object>>();
finalMap.put(AclStatus.class.getSimpleName(), m);
try {
return MAPPER.writeValueAsString(finalMap);
} catch (IOException ignored) {
}
return null;
}
use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.
the class TestExtendedAcls method testRestrictAtSubDir.
/**
* Verify that sub directory can restrict acl with acl inherited from parent.
* Create a parent dir and set default to allow foo and bar full access
* Create a sub dir and set default to restrict bar to empty access
*
* parent dir/file can be viewed by foo
* parent dir/child dir/file can be viewed by foo
* parent dir/child dir/file can not be viewed by bar
*
* @throws IOException
*/
@Test
public void testRestrictAtSubDir() throws Exception {
Path parent = new Path("/testRestrictAtSubDir");
hdfs.mkdirs(parent);
List<AclEntry> aclsParent = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, "bar", ALL));
hdfs.setAcl(parent, aclsParent);
AclEntry[] parentDirExpectedAcl = new AclEntry[] { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, GROUP, "bar", ALL), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclStatus parentAcl = hdfs.getAclStatus(parent);
assertArrayEquals(parentDirExpectedAcl, parentAcl.getEntries().toArray());
Path parentFile = new Path(parent, "parentFile");
hdfs.create(parentFile).close();
hdfs.setPermission(parentFile, new FsPermission((short) 0640));
AclEntry[] parentFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", ALL) };
AclStatus parentFileAcl = hdfs.getAclStatus(parentFile);
assertArrayEquals(parentFileExpectedAcl, parentFileAcl.getEntries().toArray());
Path childDir = new Path(parent, "childDir");
hdfs.mkdirs(childDir);
List<AclEntry> newAclsChild = Lists.newArrayList(aclEntry(DEFAULT, GROUP, "bar", NONE));
hdfs.modifyAclEntries(childDir, newAclsChild);
AclEntry[] childDirExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", ALL), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, GROUP, "bar", NONE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclStatus childDirAcl = hdfs.getAclStatus(childDir);
assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
Path childFile = new Path(childDir, "childFile");
hdfs.create(childFile).close();
hdfs.setPermission(childFile, new FsPermission((short) 0640));
AclEntry[] childFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", NONE) };
AclStatus childFileAcl = hdfs.getAclStatus(childFile);
assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
// child file should not be accessible for bar group
assertFalse(tryAccess(childFile, "barUser", new String[] { "bar" }, READ));
// child file should be accessible for foo user
assertTrue(tryAccess(childFile, "foo", new String[] { "fooGroup" }, READ));
// parent file should be accessible for bar group
assertTrue(tryAccess(parentFile, "barUser", new String[] { "bar" }, READ));
// parent file should be accessible for foo user
assertTrue(tryAccess(parentFile, "foo", new String[] { "fooGroup" }, READ));
hdfs.delete(parent, true);
}
use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.
the class AdlFileSystem method getAclStatus.
/**
* Gets the ACL of a file or directory.
*
* @param path Path to get
* @return AclStatus describing the ACL of the file or directory
* @throws IOException if an ACL could not be read
*/
@Override
public AclStatus getAclStatus(final Path path) throws IOException {
statistics.incrementReadOps(1);
com.microsoft.azure.datalake.store.acl.AclStatus adlStatus = adlClient.getAclStatus(toRelativeFilePath(path), oidOrUpn);
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner(adlStatus.owner);
aclStatusBuilder.group(adlStatus.group);
aclStatusBuilder.setPermission(new FsPermission(Short.valueOf(adlStatus.octalPermissions, 8)));
aclStatusBuilder.stickyBit(adlStatus.stickyBit);
String aclListString = com.microsoft.azure.datalake.store.acl.AclEntry.aclListToString(adlStatus.aclSpec);
List<AclEntry> aclEntries = AclEntry.parseAclSpec(aclListString, true);
aclStatusBuilder.addEntries(aclEntries);
return aclStatusBuilder.build();
}
use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.
the class TestPBHelper method testAclStatusProto.
@Test
public void testAclStatusProto() {
AclEntry e = new AclEntry.Builder().setName("test").setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT).setType(AclEntryType.OTHER).build();
AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e).build();
Assert.assertEquals(s, PBHelperClient.convert(PBHelperClient.convert(s)));
}
use of org.apache.hadoop.fs.permission.AclStatus in project hive by apache.
the class TestMetaStoreMultipleEncryptionZones method testCmRootAclPermissions.
@Test
public void testCmRootAclPermissions() throws Exception {
HiveConf hiveConfAclPermissions = new HiveConf(TestReplChangeManager.class);
hiveConfAclPermissions.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true);
hiveConfAclPermissions.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60);
hiveConfAclPermissions.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal);
String cmRootAclPermissions = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmRootAclPermissions";
hiveConfAclPermissions.set(HiveConf.ConfVars.REPLCMDIR.varname, cmRootAclPermissions);
Warehouse warehouseCmPermissions = new Warehouse(hiveConfAclPermissions);
FileSystem cmfs = new Path(cmRootAclPermissions).getFileSystem(hiveConfAclPermissions);
cmfs.mkdirs(warehouseCmPermissions.getWhRoot());
FileSystem fsWarehouse = warehouseCmPermissions.getWhRoot().getFileSystem(hiveConfAclPermissions);
// change the group of warehouse for testing
Path warehouse = new Path(hiveConfAclPermissions.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname));
fsWarehouse.setOwner(warehouse, null, "testgroup");
long now = System.currentTimeMillis();
Path dirDb = new Path(warehouseCmPermissions.getWhRoot(), "db_perm");
fsWarehouse.delete(dirDb, true);
fsWarehouse.mkdirs(dirDb);
Path dirTbl1 = new Path(dirDb, "tbl1");
fsWarehouse.mkdirs(dirTbl1);
EncryptionZoneUtils.createEncryptionZone(dirTbl1, "test_key_db", conf);
Path part11 = new Path(dirTbl1, "part1");
createFile(part11, "testClearer11");
String fileChksum11 = ReplChangeManager.checksumFor(part11, fsWarehouse);
Path part12 = new Path(dirTbl1, "part2");
createFile(part12, "testClearer12");
String fileChksum12 = ReplChangeManager.checksumFor(part12, fsWarehouse);
Path dirTbl2 = new Path(dirDb, "tbl2");
fsWarehouse.mkdirs(dirTbl2);
EncryptionZoneUtils.createEncryptionZone(dirTbl2, "test_key_db", conf);
Path part21 = new Path(dirTbl2, "part1");
createFile(part21, "testClearer21");
String fileChksum21 = ReplChangeManager.checksumFor(part21, fsWarehouse);
Path part22 = new Path(dirTbl2, "part2");
createFile(part22, "testClearer22");
String fileChksum22 = ReplChangeManager.checksumFor(part22, fsWarehouse);
Path dirTbl3 = new Path(dirDb, "tbl3");
fsWarehouse.mkdirs(dirTbl3);
EncryptionZoneUtils.createEncryptionZone(dirTbl3, "test_key_cm", conf);
Path part31 = new Path(dirTbl3, "part1");
createFile(part31, "testClearer31");
String fileChksum31 = ReplChangeManager.checksumFor(part31, fsWarehouse);
Path part32 = new Path(dirTbl3, "part2");
createFile(part32, "testClearer32");
String fileChksum32 = ReplChangeManager.checksumFor(part32, fsWarehouse);
final UserGroupInformation proxyUserUgi = UserGroupInformation.createUserForTesting("impala", new String[] { "testgroup" });
fsWarehouse.setOwner(dirDb, "impala", "default");
fsWarehouse.setOwner(dirTbl1, "impala", "default");
fsWarehouse.setOwner(dirTbl2, "impala", "default");
fsWarehouse.setOwner(dirTbl3, "impala", "default");
fsWarehouse.setOwner(part11, "impala", "default");
fsWarehouse.setOwner(part12, "impala", "default");
fsWarehouse.setOwner(part21, "impala", "default");
fsWarehouse.setOwner(part22, "impala", "default");
fsWarehouse.setOwner(part31, "impala", "default");
fsWarehouse.setOwner(part32, "impala", "default");
proxyUserUgi.doAs((PrivilegedExceptionAction<Void>) () -> {
try {
// impala doesn't have access but it belongs to a group which has access through acl.
ReplChangeManager.getInstance(hiveConfAclPermissions).recycle(dirTbl1, RecycleType.MOVE, false);
ReplChangeManager.getInstance(hiveConfAclPermissions).recycle(dirTbl2, RecycleType.MOVE, false);
ReplChangeManager.getInstance(hiveConfAclPermissions).recycle(dirTbl3, RecycleType.MOVE, true);
} catch (Exception e) {
Assert.fail();
}
return null;
});
String cmEncrypted = hiveConf.get(HiveConf.ConfVars.REPLCMENCRYPTEDDIR.varname, cmrootEncrypted);
AclStatus aclStatus = fsWarehouse.getAclStatus(new Path(dirTbl1 + Path.SEPARATOR + cmEncrypted));
AclStatus aclStatus2 = fsWarehouse.getAclStatus(new Path(dirTbl2 + Path.SEPARATOR + cmEncrypted));
AclStatus aclStatus3 = fsWarehouse.getAclStatus(new Path(dirTbl3 + Path.SEPARATOR + cmEncrypted));
AclEntry expectedAcl = new AclEntry.Builder().setScope(ACCESS).setType(GROUP).setName("testgroup").setPermission(fsWarehouse.getFileStatus(warehouse).getPermission().getGroupAction()).build();
Assert.assertTrue(aclStatus.getEntries().contains(expectedAcl));
Assert.assertTrue(aclStatus2.getEntries().contains(expectedAcl));
Assert.assertTrue(aclStatus3.getEntries().contains(expectedAcl));
assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part11.getName(), fileChksum11, ReplChangeManager.getInstance(conf).getCmRoot(part11).toString())));
assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part12.getName(), fileChksum12, ReplChangeManager.getInstance(conf).getCmRoot(part12).toString())));
assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part21.getName(), fileChksum21, ReplChangeManager.getInstance(conf).getCmRoot(part21).toString())));
assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part22.getName(), fileChksum22, ReplChangeManager.getInstance(conf).getCmRoot(part22).toString())));
assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part31.getName(), fileChksum31, ReplChangeManager.getInstance(conf).getCmRoot(part31).toString())));
assertTrue(fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part32.getName(), fileChksum32, ReplChangeManager.getInstance(conf).getCmRoot(part32).toString())));
fsWarehouse.setTimes(ReplChangeManager.getCMPath(hiveConfAclPermissions, part11.getName(), fileChksum11, ReplChangeManager.getInstance(conf).getCmRoot(part11).toString()), now - 7 * 86400 * 1000 * 2, now - 7 * 86400 * 1000 * 2);
fsWarehouse.setTimes(ReplChangeManager.getCMPath(hiveConfAclPermissions, part21.getName(), fileChksum21, ReplChangeManager.getInstance(conf).getCmRoot(part21).toString()), now - 7 * 86400 * 1000 * 2, now - 7 * 86400 * 1000 * 2);
fsWarehouse.setTimes(ReplChangeManager.getCMPath(hiveConfAclPermissions, part31.getName(), fileChksum31, ReplChangeManager.getInstance(conf).getCmRoot(part31).toString()), now - 7 * 86400 * 1000 * 2, now - 7 * 86400 * 1000 * 2);
fsWarehouse.setTimes(ReplChangeManager.getCMPath(hiveConfAclPermissions, part32.getName(), fileChksum32, ReplChangeManager.getInstance(conf).getCmRoot(part32).toString()), now - 7 * 86400 * 1000 * 2, now - 7 * 86400 * 1000 * 2);
ReplChangeManager.scheduleCMClearer(hiveConfAclPermissions);
long start = System.currentTimeMillis();
long end;
boolean cleared = false;
do {
Thread.sleep(200);
end = System.currentTimeMillis();
if (end - start > 5000) {
Assert.fail("timeout, cmroot has not been cleared");
}
if (!fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part11.getName(), fileChksum11, ReplChangeManager.getInstance(conf).getCmRoot(part11).toString())) && fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part12.getName(), fileChksum12, ReplChangeManager.getInstance(conf).getCmRoot(part12).toString())) && !fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part21.getName(), fileChksum21, ReplChangeManager.getInstance(conf).getCmRoot(part21).toString())) && fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part22.getName(), fileChksum22, ReplChangeManager.getInstance(conf).getCmRoot(part22).toString())) && !fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part31.getName(), fileChksum31, ReplChangeManager.getInstance(conf).getCmRoot(part31).toString())) && !fsWarehouse.exists(ReplChangeManager.getCMPath(hiveConfAclPermissions, part32.getName(), fileChksum32, ReplChangeManager.getInstance(conf).getCmRoot(part32).toString()))) {
cleared = true;
}
} while (!cleared);
}
Aggregations