Search in sources :

Example 91 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestDFSPermission method testAccessGroupMember.

@Test
public void testAccessGroupMember() throws IOException, InterruptedException {
    FileSystem rootFs = FileSystem.get(conf);
    Path p2 = new Path("/p2");
    rootFs.mkdirs(p2);
    rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
    rootFs.setPermission(p2, new FsPermission((short) 0740));
    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {

        @Override
        public FileSystem run() throws Exception {
            return FileSystem.get(conf);
        }
    });
    fs.access(p2, FsAction.READ);
    try {
        fs.access(p2, FsAction.EXECUTE);
        fail("The access call should have failed.");
    } catch (AccessControlException e) {
        assertTrue("Permission denied messages must carry the username", e.getMessage().contains(USER1_NAME));
        assertTrue("Permission denied messages must carry the path parent", e.getMessage().contains(p2.getParent().toUri().getPath()));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) Test(org.junit.Test)

Example 92 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestDFSPermission method testOwnership.

/* check ownership is set correctly for a file or directory */
private void testOwnership(OpType op) throws Exception {
    // case 1: superuser create a file/directory
    fs = FileSystem.get(conf);
    create(op, FILE_DIR_PATH, DEFAULT_UMASK, new FsPermission(DEFAULT_PERMISSION));
    checkOwnership(FILE_DIR_PATH, SUPERUSER.getShortUserName(), getGroup(FILE_DIR_PATH.getParent()));
    // case 2: superuser changes FILE_DIR_PATH's owner to be <user1, group3>
    setOwner(FILE_DIR_PATH, USER1.getShortUserName(), GROUP3_NAME, false);
    // case 3: user1 changes FILE_DIR_PATH's owner to be user2
    fs = DFSTestUtil.login(fs, conf, USER1);
    setOwner(FILE_DIR_PATH, USER2.getShortUserName(), null, true);
    // case 4: user1 changes FILE_DIR_PATH's group to be group1 which it belongs
    // to
    setOwner(FILE_DIR_PATH, null, GROUP1_NAME, false);
    // case 5: user1 changes FILE_DIR_PATH's group to be group3
    // which it does not belong to
    setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true);
    // case 6: user2 (non-owner) changes FILE_DIR_PATH's group to be group3
    fs = DFSTestUtil.login(fs, conf, USER2);
    setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true);
    // case 7: user2 (non-owner) changes FILE_DIR_PATH's user to be user2
    setOwner(FILE_DIR_PATH, USER2.getShortUserName(), null, true);
    // delete the file/directory
    fs = DFSTestUtil.login(fs, conf, SUPERUSER);
    fs.delete(FILE_DIR_PATH, true);
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 93 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestDFSPermission method testPermissionCheckingPerUser.

/* Check if namenode performs permission checking correctly
   * for the given user for operations mkdir, open, setReplication,
   * getFileInfo, isDirectory, exists, getContentLength, list, rename,
   * and delete */
private void testPermissionCheckingPerUser(UserGroupInformation ugi, short[] ancestorPermission, short[] parentPermission, short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs) throws Exception {
    boolean[] isDirEmpty = new boolean[NUM_TEST_PERMISSIONS];
    fs = DFSTestUtil.login(fs, conf, SUPERUSER);
    for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
        create(OpType.CREATE, files[i]);
        create(OpType.MKDIRS, dirs[i]);
        fs.setOwner(files[i], USER1_NAME, GROUP2_NAME);
        fs.setOwner(dirs[i], USER1_NAME, GROUP2_NAME);
        checkOwnership(dirs[i], USER1_NAME, GROUP2_NAME);
        checkOwnership(files[i], USER1_NAME, GROUP2_NAME);
        FsPermission fsPermission = new FsPermission(filePermission[i]);
        fs.setPermission(files[i], fsPermission);
        fs.setPermission(dirs[i], fsPermission);
        isDirEmpty[i] = (fs.listStatus(dirs[i]).length == 0);
    }
    fs = DFSTestUtil.login(fs, conf, ugi);
    for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
        testCreateMkdirs(ugi, new Path(parentDirs[i], FILE_DIR_NAME), ancestorPermission[i], parentPermission[i]);
        testOpen(ugi, files[i], ancestorPermission[i], parentPermission[i], filePermission[i]);
        testSetReplication(ugi, files[i], ancestorPermission[i], parentPermission[i], filePermission[i]);
        testSetTimes(ugi, files[i], ancestorPermission[i], parentPermission[i], filePermission[i]);
        testStats(ugi, files[i], ancestorPermission[i], parentPermission[i]);
        testList(ugi, files[i], dirs[i], ancestorPermission[i], parentPermission[i], filePermission[i]);
        int next = i == NUM_TEST_PERMISSIONS - 1 ? 0 : i + 1;
        testRename(ugi, files[i], files[next], ancestorPermission[i], parentPermission[i], ancestorPermission[next], parentPermission[next]);
        testDeleteFile(ugi, files[i], ancestorPermission[i], parentPermission[i]);
        testDeleteDir(ugi, dirs[i], ancestorPermission[i], parentPermission[i], filePermission[i], null, isDirEmpty[i]);
    }
    // test non existent file
    checkNonExistentFile();
}
Also used : Path(org.apache.hadoop.fs.Path) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 94 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestDFSPermission method testTrashPermission.

@Test(timeout = 30000)
public void testTrashPermission() throws Exception {
    //  /BSS                  user1:group2 777
    //   /BSS/user1            user1:group2 755
    //   /BSS/user1/test       user1:group1 600
    Path rootDir = new Path("/BSS");
    Path user1Dir = new Path("/BSS/user1");
    Path user1File = new Path("/BSS/user1/test");
    try {
        conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10");
        fs = FileSystem.get(conf);
        fs.mkdirs(rootDir);
        fs.setPermission(rootDir, new FsPermission((short) 0777));
        fs = DFSTestUtil.login(fs, conf, USER1);
        fs.mkdirs(user1Dir);
        fs.setPermission(user1Dir, new FsPermission((short) 0755));
        fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME);
        create(OpType.CREATE, user1File);
        fs.setOwner(user1File, USER1.getShortUserName(), GROUP1_NAME);
        fs.setPermission(user1File, new FsPermission((short) 0600));
        try {
            // login as user2, attempt to delete /BSS/user1
            // this should fail because user2 has no permission to
            // its sub directory.
            fs = DFSTestUtil.login(fs, conf, USER2);
            fs.delete(user1Dir, true);
            fail("User2 should not be allowed to delete user1's dir.");
        } catch (AccessControlException e) {
            e.printStackTrace();
            assertTrue("Permission denied messages must carry the username", e.getMessage().contains(USER2_NAME));
        }
        // ensure the /BSS/user1 still exists
        assertTrue(fs.exists(user1Dir));
        try {
            fs = DFSTestUtil.login(fs, conf, SUPERUSER);
            Trash trash = new Trash(fs, conf);
            Path trashRoot = trash.getCurrentTrashDir(user1Dir);
            while (true) {
                trashRoot = trashRoot.getParent();
                if (trashRoot.getParent().isRoot()) {
                    break;
                }
            }
            fs.mkdirs(trashRoot);
            fs.setPermission(trashRoot, new FsPermission((short) 0777));
            // login as user2, attempt to move /BSS/user1 to trash
            // this should also fail otherwise the directory will be
            // removed by trash emptier (emptier is running by superuser)
            fs = DFSTestUtil.login(fs, conf, USER2);
            Trash userTrash = new Trash(fs, conf);
            assertTrue(userTrash.isEnabled());
            userTrash.moveToTrash(user1Dir);
            fail("User2 should not be allowed to move" + "user1's dir to trash");
        } catch (IOException e) {
            // expect the exception is caused by permission denied
            assertTrue(e.getCause() instanceof AccessControlException);
            e.printStackTrace();
            assertTrue("Permission denied messages must carry the username", e.getCause().getMessage().contains(USER2_NAME));
        }
        // ensure /BSS/user1 still exists
        assertEquals(fs.exists(user1Dir), true);
    } finally {
        fs = DFSTestUtil.login(fs, conf, SUPERUSER);
        fs.delete(rootDir, true);
        conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "0");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) Trash(org.apache.hadoop.fs.Trash) Test(org.junit.Test)

Example 95 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class FSXAttrBaseTest method testGetXAttrs.

/**
   * getxattr tests. Test that getxattr throws an exception if any of
   * the following are true:
   * an xattr that was requested doesn't exist
   * the caller specifies an unknown namespace
   * the caller doesn't have access to the namespace
   * the caller doesn't have permission to get the value of the xattr
   * the caller does not have search access to the parent directory
   * the caller has only read access to the owning directory
   * the caller has only search access to the owning directory and
   * execute/search access to the actual entity
   * the caller does not have search access to the owning directory and read
   * access to the actual entity
   */
@Test(timeout = 120000)
public void testGetXAttrs() throws Exception {
    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
    fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
    final byte[] theValue = fs.getXAttr(path, "USER.a2");
    Assert.assertArrayEquals(value2, theValue);
    /* An XAttr that was requested does not exist. */
    try {
        final byte[] value = fs.getXAttr(path, name3);
        Assert.fail("expected IOException");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("At least one of the attributes provided was not found.", e);
    }
    /* Throw an exception if an xattr that was requested does not exist. */
    {
        final List<String> names = Lists.newArrayList();
        names.add(name1);
        names.add(name2);
        names.add(name3);
        try {
            final Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
            Assert.fail("expected IOException");
        } catch (IOException e) {
            GenericTestUtils.assertExceptionContains("At least one of the attributes provided was not found.", e);
        }
    }
    fs.removeXAttr(path, name1);
    fs.removeXAttr(path, name2);
    /* Unknown namespace should throw an exception. */
    try {
        final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo");
        Assert.fail("expected IOException");
    } catch (Exception e) {
        GenericTestUtils.assertExceptionContains("An XAttr name must be prefixed with " + "user/trusted/security/system/raw, " + "followed by a '.'", e);
    }
    /*
     * The 'trusted' namespace should not be accessible and should throw an
     * exception.
     */
    final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
    fs.setXAttr(path, "trusted.foo", "1234".getBytes());
    try {
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final FileSystem userFs = dfsCluster.getFileSystem();
                final byte[] xattr = userFs.getXAttr(path, "trusted.foo");
                return null;
            }
        });
        Assert.fail("expected IOException");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
    }
    fs.setXAttr(path, name1, "1234".getBytes());
    /*
     * Test that an exception is thrown if the caller doesn't have permission to
     * get the value of the xattr.
     */
    /* Set access so that only the owner has access. */
    fs.setPermission(path, new FsPermission((short) 0700));
    try {
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final FileSystem userFs = dfsCluster.getFileSystem();
                final byte[] xattr = userFs.getXAttr(path, name1);
                return null;
            }
        });
        Assert.fail("expected IOException");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Permission denied", e);
    }
    /*
     * The caller must have search access to the parent directory.
     */
    final Path childDir = new Path(path, "child" + pathCount);
    /* Set access to parent so that only the owner has access. */
    FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short) 0700));
    fs.setXAttr(childDir, name1, "1234".getBytes());
    try {
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final FileSystem userFs = dfsCluster.getFileSystem();
                final byte[] xattr = userFs.getXAttr(childDir, name1);
                return null;
            }
        });
        Assert.fail("expected IOException");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Permission denied", e);
    }
    /* Check that read access to the owning directory is not good enough. */
    fs.setPermission(path, new FsPermission((short) 0704));
    try {
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final FileSystem userFs = dfsCluster.getFileSystem();
                final byte[] xattr = userFs.getXAttr(childDir, name1);
                return null;
            }
        });
        Assert.fail("expected IOException");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Permission denied", e);
    }
    /*
     * Check that search access to the owning directory and search/execute
     * access to the actual entity with extended attributes is not good enough.
     */
    fs.setPermission(path, new FsPermission((short) 0701));
    fs.setPermission(childDir, new FsPermission((short) 0701));
    try {
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws Exception {
                final FileSystem userFs = dfsCluster.getFileSystem();
                final byte[] xattr = userFs.getXAttr(childDir, name1);
                return null;
            }
        });
        Assert.fail("expected IOException");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Permission denied", e);
    }
    /*
     * Check that search access to the owning directory and read access to
     * the actual entity with the extended attribute is good enough.
     */
    fs.setPermission(path, new FsPermission((short) 0701));
    fs.setPermission(childDir, new FsPermission((short) 0704));
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final FileSystem userFs = dfsCluster.getFileSystem();
            final byte[] xattr = userFs.getXAttr(childDir, name1);
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) List(java.util.List) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Map(java.util.Map) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15