Search in sources :

Example 1 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestACLFeatures method getAclStatusAsExpected.

@Test
public void getAclStatusAsExpected() throws URISyntaxException, IOException {
    getMockServer().enqueue(new MockResponse().setResponseCode(200).setBody(TestADLResponseData.getGetAclStatusJSONResponse()));
    AclStatus aclStatus = getMockAdlFileSystem().getAclStatus(new Path("/test1/test2"));
    Assert.assertEquals(aclStatus.getGroup(), "supergroup");
    Assert.assertEquals(aclStatus.getOwner(), "hadoop");
    Assert.assertEquals((Short) aclStatus.getPermission().toShort(), Short.valueOf("775", 8));
    for (AclEntry entry : aclStatus.getEntries()) {
        if (!(entry.toString().equalsIgnoreCase("user:carla:rw-") || entry.toString().equalsIgnoreCase("group::r-x"))) {
            Assert.fail("Unexpected entry : " + entry.toString());
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MockResponse(com.squareup.okhttp.mockwebserver.MockResponse) AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) Test(org.junit.Test)

Example 2 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestExtendedAcls method testDefaultAclNewChildDirFile.

/**
   * Set default ACL to a directory.
   * Create subdirectory, it must have default acls set.
   * Create sub file and it should have default acls.
   * @throws IOException
   */
@Test
public void testDefaultAclNewChildDirFile() throws IOException {
    Path parent = new Path("/testDefaultAclNewChildDirFile");
    List<AclEntry> acls = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", ALL));
    hdfs.mkdirs(parent);
    hdfs.setAcl(parent, acls);
    // create sub directory
    Path childDir = new Path(parent, "childDir");
    hdfs.mkdirs(childDir);
    // the sub directory should have the default acls
    AclEntry[] childDirExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
    AclStatus childDirAcl = hdfs.getAclStatus(childDir);
    assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
    // create sub file
    Path childFile = new Path(parent, "childFile");
    hdfs.create(childFile).close();
    // the sub file should have the default acls
    AclEntry[] childFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) };
    AclStatus childFileAcl = hdfs.getAclStatus(childFile);
    assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
    hdfs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) Test(org.junit.Test)

Example 3 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestExtendedAcls method testDefaultAclExistingDirFile.

/**
   * Set default ACL to a directory and make sure existing sub dirs/files
   * does not have default acl.
   * @throws IOException
   */
@Test
public void testDefaultAclExistingDirFile() throws Exception {
    Path parent = new Path("/testDefaultAclExistingDirFile");
    hdfs.mkdirs(parent);
    // the old acls
    List<AclEntry> acls1 = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", ALL));
    // the new acls
    List<AclEntry> acls2 = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
    // set parent to old acl
    hdfs.setAcl(parent, acls1);
    Path childDir = new Path(parent, "childDir");
    hdfs.mkdirs(childDir);
    // the sub directory should also have the old acl
    AclEntry[] childDirExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
    AclStatus childDirAcl = hdfs.getAclStatus(childDir);
    assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
    Path childFile = new Path(childDir, "childFile");
    // the sub file should also have the old acl
    hdfs.create(childFile).close();
    AclEntry[] childFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE) };
    AclStatus childFileAcl = hdfs.getAclStatus(childFile);
    assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
    // now change parent to new acls
    hdfs.setAcl(parent, acls2);
    // sub directory and sub file should still have the old acls
    childDirAcl = hdfs.getAclStatus(childDir);
    assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
    childFileAcl = hdfs.getAclStatus(childFile);
    assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
    // now remove the parent acls
    hdfs.removeAcl(parent);
    // sub directory and sub file should still have the old acls
    childDirAcl = hdfs.getAclStatus(childDir);
    assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
    childFileAcl = hdfs.getAclStatus(childFile);
    assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
    // check changing the access mode of the file
    // mask out the access of group other for testing
    hdfs.setPermission(childFile, new FsPermission((short) 0640));
    boolean canAccess = tryAccess(childFile, "other", new String[] { "other" }, READ);
    assertFalse(canAccess);
    hdfs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 4 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestExtendedAcls method testAccessAclNotInherited.

/**
   * Verify that access acl does not get inherited on newly created subdir/file.
   * @throws IOException
   */
@Test
public void testAccessAclNotInherited() throws IOException {
    Path parent = new Path("/testAccessAclNotInherited");
    hdfs.mkdirs(parent);
    // parent have both access acl and default acl
    List<AclEntry> acls = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, GROUP, READ), aclEntry(ACCESS, OTHER, READ), aclEntry(ACCESS, USER, "bar", ALL));
    hdfs.setAcl(parent, acls);
    Path childDir = new Path(parent, "childDir");
    hdfs.mkdirs(childDir);
    // subdirectory should only have the default acl inherited
    AclEntry[] childDirExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ), aclEntry(DEFAULT, USER, READ_WRITE), aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, READ) };
    AclStatus childDirAcl = hdfs.getAclStatus(childDir);
    assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
    Path childFile = new Path(parent, "childFile");
    hdfs.create(childFile).close();
    // sub file should only have the default acl inherited
    AclEntry[] childFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ) };
    AclStatus childFileAcl = hdfs.getAclStatus(childFile);
    assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
    hdfs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) Test(org.junit.Test)

Example 5 with AclStatus

use of org.apache.hadoop.fs.permission.AclStatus in project hadoop by apache.

the class TestExtendedAcls method testGradSubdirMoreAccess.

/**
   * Create a parent dir and set default acl to allow foo read/write access.
   * Create a sub dir and set default acl to allow bar group read/write access.
   * parent dir/file can not be viewed/appended by bar group.
   * parent dir/child dir/file can be viewed/appended by bar group.
   * @throws Exception
   */
@Test
public void testGradSubdirMoreAccess() throws Exception {
    Path parent = new Path("/testGradSubdirMoreAccess");
    hdfs.mkdirs(parent);
    List<AclEntry> aclsParent = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
    List<AclEntry> aclsChild = Lists.newArrayList(aclEntry(DEFAULT, GROUP, "bar", READ_WRITE));
    hdfs.setAcl(parent, aclsParent);
    AclEntry[] parentDirExpectedAcl = new AclEntry[] { aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, MASK, READ_EXECUTE), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
    AclStatus parentAcl = hdfs.getAclStatus(parent);
    assertArrayEquals(parentDirExpectedAcl, parentAcl.getEntries().toArray());
    Path childDir = new Path(parent, "childDir");
    hdfs.mkdirs(childDir);
    hdfs.modifyAclEntries(childDir, aclsChild);
    // child dir should inherit the default acls from parent, plus bar group
    AclEntry[] childDirExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", READ_EXECUTE), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, GROUP, "bar", READ_WRITE), aclEntry(DEFAULT, MASK, ALL), aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
    AclStatus childDirAcl = hdfs.getAclStatus(childDir);
    assertArrayEquals(childDirExpectedAcl, childDirAcl.getEntries().toArray());
    Path parentFile = new Path(parent, "parentFile");
    hdfs.create(parentFile).close();
    hdfs.setPermission(parentFile, new FsPermission((short) 0640));
    // parent dir/parent file allows foo to access but not bar group
    AclEntry[] parentFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ_EXECUTE) };
    AclStatus parentFileAcl = hdfs.getAclStatus(parentFile);
    assertArrayEquals(parentFileExpectedAcl, parentFileAcl.getEntries().toArray());
    Path childFile = new Path(childDir, "childFile");
    hdfs.create(childFile).close();
    hdfs.setPermission(childFile, new FsPermission((short) 0640));
    // child dir/child file allows foo user and bar group to access
    AclEntry[] childFileExpectedAcl = new AclEntry[] { aclEntry(ACCESS, USER, "foo", READ_EXECUTE), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_WRITE) };
    AclStatus childFileAcl = hdfs.getAclStatus(childFile);
    assertArrayEquals(childFileExpectedAcl, childFileAcl.getEntries().toArray());
    // parent file should not be accessible for bar group
    assertFalse(tryAccess(parentFile, "barUser", new String[] { "bar" }, READ));
    // child file should be accessible for bar group
    assertTrue(tryAccess(childFile, "barUser", new String[] { "bar" }, READ));
    // parent file should be accessible for foo user
    assertTrue(tryAccess(parentFile, "foo", new String[] { "fooGroup" }, READ));
    // child file should be accessible for foo user
    assertTrue(tryAccess(childFile, "foo", new String[] { "fooGroup" }, READ));
    hdfs.delete(parent, true);
}
Also used : Path(org.apache.hadoop.fs.Path) AclStatus(org.apache.hadoop.fs.permission.AclStatus) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Aggregations

AclStatus (org.apache.hadoop.fs.permission.AclStatus)96 AclEntry (org.apache.hadoop.fs.permission.AclEntry)84 Test (org.junit.Test)76 Path (org.apache.hadoop.fs.Path)42 FsPermission (org.apache.hadoop.fs.permission.FsPermission)14 FSAclBaseTest (org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest)10 FileSystem (org.apache.hadoop.fs.FileSystem)8 Configuration (org.apache.hadoop.conf.Configuration)5 FileStatus (org.apache.hadoop.fs.FileStatus)5 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 List (java.util.List)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)3 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)3 Builder (org.apache.hadoop.fs.ContentSummary.Builder)2 FsShell (org.apache.hadoop.fs.FsShell)2 MetastoreUnitTest (org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)2 AccessControlException (org.apache.hadoop.security.AccessControlException)2 Pair (alluxio.collections.Pair)1