Search in sources :

Example 56 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestAclWithSnapshot method testChangeAclExceedsQuota.

@Test
public void testChangeAclExceedsQuota() throws Exception {
    Path filePath = new Path(path, "file1");
    Path fileSnapshotPath = new Path(snapshotPath, "file1");
    FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0755));
    hdfs.allowSnapshot(path);
    hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
    FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short) 0600)).close();
    hdfs.setPermission(filePath, FsPermission.createImmutable((short) 0600));
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "bruce", READ_WRITE));
    hdfs.modifyAclEntries(filePath, aclSpec);
    hdfs.createSnapshot(path, snapshotName);
    AclStatus s = hdfs.getAclStatus(filePath);
    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE) }, returned);
    assertPermission((short) 010660, filePath);
    s = hdfs.getAclStatus(fileSnapshotPath);
    returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_WRITE), aclEntry(ACCESS, GROUP, NONE) }, returned);
    assertPermission((short) 010660, filePath);
    aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, "bruce", READ));
    hdfs.modifyAclEntries(filePath, aclSpec);
}
Also used : Path(org.apache.hadoop.fs.Path) AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FSAclBaseTest(org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest) Test(org.junit.Test)

Example 57 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestAclWithSnapshot method testDefaultAclNotCopiedToAccessAclOfNewSnapshot.

@Test
public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot() throws Exception {
    FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
    hdfs.modifyAclEntries(path, aclSpec);
    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
    AclStatus s = hdfs.getAclStatus(path);
    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE), aclEntry(DEFAULT, GROUP, NONE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned);
    assertPermission((short) 010700, path);
    s = hdfs.getAclStatus(snapshotPath);
    returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE), aclEntry(DEFAULT, GROUP, NONE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE) }, returned);
    assertPermission((short) 010700, snapshotPath);
    assertDirPermissionDenied(fsAsBruce, BRUCE, snapshotPath);
}
Also used : AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FSAclBaseTest(org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest) Test(org.junit.Test)

Example 58 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestAclWithSnapshot method testOriginalAclEnforcedForSnapshotRootAfterChange.

@Test
public void testOriginalAclEnforcedForSnapshotRootAfterChange() throws Exception {
    FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE), aclEntry(ACCESS, OTHER, NONE));
    hdfs.setAcl(path, aclSpec);
    assertDirPermissionGranted(fsAsBruce, BRUCE, path);
    assertDirPermissionDenied(fsAsDiana, DIANA, path);
    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
    // Both original and snapshot still have same ACL.
    AclStatus s = hdfs.getAclStatus(path);
    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned);
    assertPermission((short) 010750, path);
    s = hdfs.getAclStatus(snapshotPath);
    returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned);
    assertPermission((short) 010750, snapshotPath);
    assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
    assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
    aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, READ_EXECUTE), aclEntry(ACCESS, USER, "diana", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE), aclEntry(ACCESS, OTHER, NONE));
    hdfs.setAcl(path, aclSpec);
    // Original has changed, but snapshot still has old ACL.
    doSnapshotRootChangeAssertions(path, snapshotPath);
    restart(false);
    doSnapshotRootChangeAssertions(path, snapshotPath);
    restart(true);
    doSnapshotRootChangeAssertions(path, snapshotPath);
}
Also used : AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FSAclBaseTest(org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest) Test(org.junit.Test)

Example 59 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestAclWithSnapshot method testOriginalAclEnforcedForSnapshotRootAfterRemoval.

@Test
public void testOriginalAclEnforcedForSnapshotRootAfterRemoval() throws Exception {
    FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE), aclEntry(ACCESS, OTHER, NONE));
    hdfs.setAcl(path, aclSpec);
    assertDirPermissionGranted(fsAsBruce, BRUCE, path);
    assertDirPermissionDenied(fsAsDiana, DIANA, path);
    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
    // Both original and snapshot still have same ACL.
    AclStatus s = hdfs.getAclStatus(path);
    AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned);
    assertPermission((short) 010750, path);
    s = hdfs.getAclStatus(snapshotPath);
    returned = s.getEntries().toArray(new AclEntry[0]);
    assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, GROUP, NONE) }, returned);
    assertPermission((short) 010750, snapshotPath);
    assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
    assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
    hdfs.removeAcl(path);
    // Original has changed, but snapshot still has old ACL.
    doSnapshotRootRemovalAssertions(path, snapshotPath);
    restart(false);
    doSnapshotRootRemovalAssertions(path, snapshotPath);
    restart(true);
    doSnapshotRootRemovalAssertions(path, snapshotPath);
}
Also used : AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FSAclBaseTest(org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest) Test(org.junit.Test)

Example 60 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestJsonUtil method testToAclStatus.

@Test
public void testToAclStatus() throws IOException {
    String jsonString = "{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
    ObjectReader reader = new ObjectMapper().readerFor(Map.class);
    Map<?, ?> json = reader.readValue(jsonString);
    List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "user1", READ_WRITE), aclEntry(ACCESS, GROUP, READ_WRITE), aclEntry(ACCESS, OTHER, READ_EXECUTE));
    AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
    aclStatusBuilder.owner("testuser");
    aclStatusBuilder.group("supergroup");
    aclStatusBuilder.addEntries(aclSpec);
    aclStatusBuilder.stickyBit(false);
    Assert.assertEquals("Should be equal", aclStatusBuilder.build(), JsonUtilClient.toAclStatus(json));
}
Also used : AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) ObjectReader(com.fasterxml.jackson.databind.ObjectReader) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test)

Aggregations

AclStatus (org.apache.hadoop.fs.permission.AclStatus)96 AclEntry (org.apache.hadoop.fs.permission.AclEntry)84 Test (org.junit.Test)76 Path (org.apache.hadoop.fs.Path)42 FsPermission (org.apache.hadoop.fs.permission.FsPermission)14 FSAclBaseTest (org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest)10 FileSystem (org.apache.hadoop.fs.FileSystem)8 Configuration (org.apache.hadoop.conf.Configuration)5 FileStatus (org.apache.hadoop.fs.FileStatus)5 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 List (java.util.List)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)3 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)3 Builder (org.apache.hadoop.fs.ContentSummary.Builder)2 FsShell (org.apache.hadoop.fs.FsShell)2 MetastoreUnitTest (org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)2 AccessControlException (org.apache.hadoop.security.AccessControlException)2 Pair (alluxio.collections.Pair)1