use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.
the class AclTransformation method filterAclEntriesByAclSpec.
/**
* Filters (discards) any existing ACL entries that have the same scope, type
* and name of any entry in the ACL spec. If necessary, recalculates the mask
* entries. If necessary, default entries may be inferred by copying the
* permissions of the corresponding access entries. It is invalid to request
* removal of the mask entry from an ACL that would otherwise require a mask
* entry, due to existing named entries or an unnamed group entry.
*
* @param existingAcl List<AclEntry> existing ACL
* @param inAclSpec List<AclEntry> ACL spec describing entries to filter
* @return List<AclEntry> new ACL
* @throws AclException if validation fails
*/
public static List<AclEntry> filterAclEntriesByAclSpec(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry : existingAcl) {
if (aclSpec.containsKey(existingEntry)) {
scopeDirty.add(existingEntry.getScope());
if (existingEntry.getType() == MASK) {
maskDirty.add(existingEntry.getScope());
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.
the class TestAclCommands method testMultipleAclSpecParsingWithoutPermissions.
@Test
public void testMultipleAclSpecParsingWithoutPermissions() throws Exception {
List<AclEntry> parsedList = AclEntry.parseAclSpec("user::,user:user1:,group::,group:group1:,mask::,other::," + "default:user:user1::,default:mask::", false);
AclEntry owner = new AclEntry.Builder().setType(AclEntryType.USER).build();
AclEntry namedUser = new AclEntry.Builder().setType(AclEntryType.USER).setName("user1").build();
AclEntry group = new AclEntry.Builder().setType(AclEntryType.GROUP).build();
AclEntry namedGroup = new AclEntry.Builder().setType(AclEntryType.GROUP).setName("group1").build();
AclEntry mask = new AclEntry.Builder().setType(AclEntryType.MASK).build();
AclEntry other = new AclEntry.Builder().setType(AclEntryType.OTHER).build();
AclEntry defaultUser = new AclEntry.Builder().setScope(AclEntryScope.DEFAULT).setType(AclEntryType.USER).setName("user1").build();
AclEntry defaultMask = new AclEntry.Builder().setScope(AclEntryScope.DEFAULT).setType(AclEntryType.MASK).build();
List<AclEntry> expectedList = new ArrayList<AclEntry>();
expectedList.add(owner);
expectedList.add(namedUser);
expectedList.add(group);
expectedList.add(namedGroup);
expectedList.add(mask);
expectedList.add(other);
expectedList.add(defaultUser);
expectedList.add(defaultMask);
assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.
the class TestAclCommands method testMultipleAclSpecParsing.
@Test
public void testMultipleAclSpecParsing() throws Exception {
List<AclEntry> parsedList = AclEntry.parseAclSpec("group::rwx,user:user1:rwx,user:user2:rw-," + "group:group1:rw-,default:group:group1:rw-", true);
AclEntry basicAcl = new AclEntry.Builder().setType(AclEntryType.GROUP).setPermission(FsAction.ALL).build();
AclEntry user1Acl = new AclEntry.Builder().setType(AclEntryType.USER).setPermission(FsAction.ALL).setName("user1").build();
AclEntry user2Acl = new AclEntry.Builder().setType(AclEntryType.USER).setPermission(FsAction.READ_WRITE).setName("user2").build();
AclEntry group1Acl = new AclEntry.Builder().setType(AclEntryType.GROUP).setPermission(FsAction.READ_WRITE).setName("group1").build();
AclEntry defaultAcl = new AclEntry.Builder().setType(AclEntryType.GROUP).setPermission(FsAction.READ_WRITE).setName("group1").setScope(AclEntryScope.DEFAULT).build();
List<AclEntry> expectedList = new ArrayList<AclEntry>();
expectedList.add(basicAcl);
expectedList.add(user1Acl);
expectedList.add(user2Acl);
expectedList.add(group1Acl);
expectedList.add(defaultAcl);
assertEquals("Parsed Acl not correct", expectedList, parsedList);
}
use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.
the class TestExtendedAcls method testDefaultAclNewChildDirFile.
/**
* Set default ACL to a directory.
* Create subdirectory, it must have default acls set.
* Create sub file and it should have default acls.
* @throws IOException
*/
@Test
public void testDefaultAclNewChildDirFile() throws IOException {
Path parent = new Path("/testDefaultAclNewChildDirFile");
List<AclEntry> acls = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", ALL));
hdfs.mkdirs(parent);
hdfs.setAcl(parent, acls);
// create sub directory
Path childDir = new Path(parent, "childDir");
hdfs.mkdirs(childDir);
// the sub directory should have the default acls
AclEntry[] childDirExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclStatus childDirAcl = hdfs.getAclStatus(childDir);
assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
// create sub file
Path childFile = new Path(parent, "childFile");
hdfs.create(childFile).close();
// the sub file should have the default acls
AclEntry[] childFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) };
AclStatus childFileAcl = hdfs.getAclStatus(childFile);
assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
hdfs.delete(parent, true);
}
use of org.apache.hadoop.fs.permission.AclEntry in project hadoop by apache.
the class TestExtendedAcls method testDefaultAclExistingDirFile.
/**
* Set default ACL to a directory and make sure existing sub dirs/files
* does not have default acl.
* @throws IOException
*/
@Test
public void testDefaultAclExistingDirFile() throws Exception {
Path parent = new Path("/testDefaultAclExistingDirFile");
hdfs.mkdirs(parent);
// the old acls
List<AclEntry> acls1 = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", ALL));
// the new acls
List<AclEntry> acls2 = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
// set parent to old acl
hdfs.setAcl(parent, acls1);
Path childDir = new Path(parent, "childDir");
hdfs.mkdirs(childDir);
// the sub directory should also have the old acl
AclEntry[] childDirExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclStatus childDirAcl = hdfs.getAclStatus(childDir);
assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
Path childFile = new Path(childDir, "childFile");
// the sub file should also have the old acl
hdfs.create(childFile).close();
AclEntry[] childFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) };
AclStatus childFileAcl = hdfs.getAclStatus(childFile);
assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
// now change parent to new acls
hdfs.setAcl(parent, acls2);
// sub directory and sub file should still have the old acls
childDirAcl = hdfs.getAclStatus(childDir);
assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
childFileAcl = hdfs.getAclStatus(childFile);
assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
// now remove the parent acls
hdfs.removeAcl(parent);
// sub directory and sub file should still have the old acls
childDirAcl = hdfs.getAclStatus(childDir);
assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
childFileAcl = hdfs.getAclStatus(childFile);
assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
// check changing the access mode of the file
// mask out the access of group other for testing
hdfs.setPermission(childFile, new FsPermission((short) 0640));
boolean canAccess = tryAccess(childFile, "other", new String[] { "other" }, READ);
assertFalse(canAccess);
hdfs.delete(parent, true);
}
Aggregations