Search in sources :

Example 26 with UserGroupInformation

use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.

the class TestAclsEndToEnd method testGoodWithKeyAcls.

/**
   * Test the full life cycle of a key using a config with key ACLs.
   * The configuration used is the correct configuration to pass the full ACL
   * test in {@link #doFullAclTest()}.
   *
   * @throws Exception thrown on test failure
   */
@Test
public void testGoodWithKeyAcls() throws Exception {
    UserGroupInformation hdfsUgi = UserGroupInformation.createProxyUserForTesting("hdfs", realUgi, new String[] { "supergroup" });
    UserGroupInformation keyadminUgi = UserGroupInformation.createProxyUserForTesting("keyadmin", realUgi, new String[] { "keyadmin" });
    UserGroupInformation userUgi = UserGroupInformation.createProxyUserForTesting("user", realUgi, new String[] { "staff" });
    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
    setBlacklistAcls(conf, hdfsUgi);
    setKeyAcls(conf, KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".", hdfsUgi, keyadminUgi, userUgi);
    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) KMSConfiguration(org.apache.hadoop.crypto.key.kms.server.KMSConfiguration) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 27 with UserGroupInformation

use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.

the class TestAclsEndToEnd method testGoodWithWhitelistWithoutBlacklist.

/**
   * Test the full life cycle of a key using a config with whitelist key ACLs
   * and without blacklist ACLs.  The configuration used is the correct
   * configuration to pass the full ACL test in {@link #doFullAclTest()}.
   *
   * @throws Exception thrown on test failure
   */
@Test
public void testGoodWithWhitelistWithoutBlacklist() throws Exception {
    UserGroupInformation hdfsUgi = UserGroupInformation.createProxyUserForTesting("hdfs", realUgi, new String[] { "supergroup" });
    UserGroupInformation keyadminUgi = UserGroupInformation.createProxyUserForTesting("keyadmin", realUgi, new String[] { "keyadmin" });
    UserGroupInformation userUgi = UserGroupInformation.createProxyUserForTesting("user", realUgi, new String[] { "staff" });
    Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
    setKeyAcls(conf, KMSConfiguration.WHITELIST_KEY_ACL_PREFIX, hdfsUgi, keyadminUgi, userUgi);
    doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) KMSConfiguration(org.apache.hadoop.crypto.key.kms.server.KMSConfiguration) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 28 with UserGroupInformation

use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.

the class TestSafeMode method testOperationsWhileInSafeMode.

/**
   * Run various fs operations while the NN is in safe mode,
   * assert that they are either allowed or fail as expected.
   */
@Test
public void testOperationsWhileInSafeMode() throws IOException, InterruptedException {
    final Path file1 = new Path("/file1");
    assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
    DFSTestUtil.createFile(fs, file1, 1024, (short) 1, 0);
    assertTrue("Could not enter SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
    runFsFun("Set quota while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            ((DistributedFileSystem) fs).setQuota(file1, 1, 1);
        }
    });
    runFsFun("Set perm while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setPermission(file1, FsPermission.getDefault());
        }
    });
    runFsFun("Set owner while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setOwner(file1, "user", "group");
        }
    });
    runFsFun("Set repl while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setReplication(file1, (short) 1);
        }
    });
    runFsFun("Append file while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            DFSTestUtil.appendFile(fs, file1, "new bytes");
        }
    });
    runFsFun("Truncate file while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.truncate(file1, 0);
        }
    });
    runFsFun("Delete file while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.delete(file1, false);
        }
    });
    runFsFun("Rename file while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.rename(file1, new Path("file2"));
        }
    });
    runFsFun("Set time while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setTimes(file1, 0, 0);
        }
    });
    runFsFun("modifyAclEntries while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.modifyAclEntries(file1, Lists.<AclEntry>newArrayList());
        }
    });
    runFsFun("removeAclEntries while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.removeAclEntries(file1, Lists.<AclEntry>newArrayList());
        }
    });
    runFsFun("removeDefaultAcl while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.removeDefaultAcl(file1);
        }
    });
    runFsFun("removeAcl while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.removeAcl(file1);
        }
    });
    runFsFun("setAcl while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setAcl(file1, Lists.<AclEntry>newArrayList());
        }
    });
    runFsFun("setXAttr while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.setXAttr(file1, "user.a1", null);
        }
    });
    runFsFun("removeXAttr while in SM", new FSRun() {

        @Override
        public void run(FileSystem fs) throws IOException {
            fs.removeXAttr(file1, "user.a1");
        }
    });
    try {
        DFSTestUtil.readFile(fs, file1);
    } catch (IOException ioe) {
        fail("Set times failed while in SM");
    }
    try {
        fs.getAclStatus(file1);
    } catch (IOException ioe) {
        fail("getAclStatus failed while in SM");
    }
    // Test access
    UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
    FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction<FileSystem>() {

        @Override
        public FileSystem run() throws IOException {
            return FileSystem.get(conf);
        }
    });
    myfs.access(file1, FsAction.READ);
    try {
        myfs.access(file1, FsAction.WRITE);
        fail("The access call should have failed.");
    } catch (AccessControlException e) {
    // expected
    }
    assertFalse("Could not leave SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) AclEntry(org.apache.hadoop.fs.permission.AclEntry) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 29 with UserGroupInformation

use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.

the class TestSecureEncryptionZoneWithKMS method testSecureEncryptionZoneWithKMS.

@Test
public void testSecureEncryptionZoneWithKMS() throws IOException, InterruptedException {
    final Path zonePath = new Path(TEST_PATH, "TestEZ1");
    fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), true);
    fsWrapper.setOwner(zonePath, OOZIE_PROXIED_USER_NAME, "supergroup");
    dfsAdmin.createEncryptionZone(zonePath, testKey, NO_TRASH);
    UserGroupInformation oozieUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(ooziePrincipal, keytab);
    UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUser(OOZIE_PROXIED_USER_NAME, oozieUgi);
    proxyUserUgi.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws IOException {
            // Get a client handler within the proxy user context for createFile
            try (DistributedFileSystem dfs = cluster.getFileSystem()) {
                for (int i = 0; i < 3; i++) {
                    Path filePath = new Path(zonePath, "testData." + i + ".dat");
                    DFSTestUtil.createFile(dfs, filePath, 1024, (short) 3, 1L);
                }
                return null;
            } catch (IOException e) {
                throw new IOException(e);
            }
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 30 with UserGroupInformation

use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.

the class TestReservedRawPaths method testAdminAccessOnly.

@Test(timeout = 120000)
public void testAdminAccessOnly() throws Exception {
    final Path zone = new Path("zone");
    final Path slashZone = new Path("/", zone);
    fs.mkdirs(slashZone);
    dfsAdmin.createEncryptionZone(slashZone, TEST_KEY, NO_TRASH);
    final Path base = new Path("base");
    final Path reservedRaw = new Path("/.reserved/raw");
    final int len = 8192;
    /* Test failure of create file in reserved/raw as non admin */
    final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final DistributedFileSystem fs = cluster.getFileSystem();
            try {
                final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
                DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
                fail("access to /.reserved/raw is superuser-only operation");
            } catch (AccessControlException e) {
                assertExceptionContains("Superuser privilege is required", e);
            }
            return null;
        }
    });
    /* Test failure of getFileStatus in reserved/raw as non admin */
    final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
    DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final DistributedFileSystem fs = cluster.getFileSystem();
            try {
                fs.getFileStatus(ezRawEncFile);
                fail("access to /.reserved/raw is superuser-only operation");
            } catch (AccessControlException e) {
                assertExceptionContains("Superuser privilege is required", e);
            }
            return null;
        }
    });
    /* Test failure of listStatus in reserved/raw as non admin */
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final DistributedFileSystem fs = cluster.getFileSystem();
            try {
                fs.listStatus(ezRawEncFile);
                fail("access to /.reserved/raw is superuser-only operation");
            } catch (AccessControlException e) {
                assertExceptionContains("Superuser privilege is required", e);
            }
            return null;
        }
    });
    fs.setPermission(new Path("/"), new FsPermission((short) 0777));
    /* Test failure of mkdir in reserved/raw as non admin */
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final DistributedFileSystem fs = cluster.getFileSystem();
            final Path d1 = new Path(reservedRaw, "dir1");
            try {
                fs.mkdirs(d1);
                fail("access to /.reserved/raw is superuser-only operation");
            } catch (AccessControlException e) {
                assertExceptionContains("Superuser privilege is required", e);
            }
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)621 IOException (java.io.IOException)274 Test (org.junit.Test)220 Configuration (org.apache.hadoop.conf.Configuration)138 Path (org.apache.hadoop.fs.Path)91 FileSystem (org.apache.hadoop.fs.FileSystem)59 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)57 AccessControlException (org.apache.hadoop.security.AccessControlException)54 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)50 Path (javax.ws.rs.Path)47 Produces (javax.ws.rs.Produces)45 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)45 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)43 AuthorizationException (org.apache.hadoop.security.authorize.AuthorizationException)39 Token (org.apache.hadoop.security.token.Token)39 ArrayList (java.util.ArrayList)38 FsPermission (org.apache.hadoop.fs.permission.FsPermission)36 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)36 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)35 Text (org.apache.hadoop.io.Text)34