use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.
the class TestAclWithSnapshot method testChangeAclExceedsQuota.
@Test
public void testChangeAclExceedsQuota() throws Exception {
Path filePath = new Path(path, "file1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short) 0600)).close();
hdfs.setPermission(filePath, FsPermission.createImmutable((short) 0600));
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "bruce", READ_WRITE));
hdfs.modifyAclEntries(filePath, aclSpec);
hdfs.createSnapshot(path, snapshotName);
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short) 010660, filePath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short) 010660, filePath);
aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "bruce", READ));
hdfs.modifyAclEntries(filePath, aclSpec);
}
use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.
the class TestAclWithSnapshot method testDefaultAclNotCopiedToAccessAclOfNewSnapshot.
@Test
public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
hdfs.modifyAclEntries(path, aclSpec);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE), aclEntry(DEFAULT, GROUP, NONE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short) 010700, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE), aclEntry(DEFAULT, GROUP, NONE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short) 010700, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, snapshotPath);
}
use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.
the class TestAclWithSnapshot method testOriginalAclEnforcedForSnapshotRootAfterChange.
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterChange() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE), aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short) 010750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short) 010750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, READ_EXECUTE), aclEntry(ACCESS, USER, "diana", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE), aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(false);
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(true);
doSnapshotRootChangeAssertions(path, snapshotPath);
}
use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.
the class TestAclWithSnapshot method testOriginalAclEnforcedForSnapshotRootAfterRemoval.
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterRemoval() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE), aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short) 010750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short) 010750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
hdfs.removeAcl(path);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(false);
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(true);
doSnapshotRootRemovalAssertions(path, snapshotPath);
}
use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.
the class TestJsonUtil method testToAclStatus.
@Test
public void testToAclStatus() throws IOException {
String jsonString = "{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
ObjectReader reader = new ObjectMapper().readerFor(Map.class);
Map<?, ?> json = reader.readValue(jsonString);
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "user1", READ_WRITE), aclEntry(ACCESS, GROUP, READ_WRITE), aclEntry(ACCESS, OTHER, READ_EXECUTE));
AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.addEntries(aclSpec);
aclStatusBuilder.stickyBit(false);
Assert.assertEquals("Should be equal", aclStatusBuilder.build(), JsonUtilClient.toAclStatus(json));
}
Aggregations