Search in sources :

Example 26 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class TestFileAppend2 method testSimpleAppend.

/**
   * Creates one file, writes a few bytes to it and then closed it.
   * Reopens the same file for appending, write all blocks and then close.
   * Verify that all data exists in file.
   * @throws IOException an exception might be thrown
   */
@Test
public void testSimpleAppend() throws IOException {
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    try {
        {
            // test appending to a file.
            // create a new file.
            Path file1 = new Path("/simpleAppend.dat");
            FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
            System.out.println("Created file simpleAppend.dat");
            // write to file
            // io.bytes.per.checksum bytes
            int mid = 186;
            System.out.println("Writing " + mid + " bytes to file " + file1);
            stm.write(fileContents, 0, mid);
            stm.close();
            System.out.println("Wrote and Closed first part of file.");
            // write to file
            // io.bytes.per.checksum bytes
            int mid2 = 607;
            System.out.println("Writing " + mid + " bytes to file " + file1);
            stm = fs.append(file1);
            stm.write(fileContents, mid, mid2 - mid);
            stm.close();
            System.out.println("Wrote and Closed second part of file.");
            // write the remainder of the file
            stm = fs.append(file1);
            // ensure getPos is set to reflect existing size of the file
            assertTrue(stm.getPos() > 0);
            System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file " + file1);
            stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
            System.out.println("Written second part of file");
            stm.close();
            System.out.println("Wrote and Closed second part of file.");
            // verify that entire file is good
            AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
        }
        {
            // test appending to an non-existing file.
            FSDataOutputStream out = null;
            try {
                out = fs.append(new Path("/non-existing.dat"));
                fail("Expected to have FileNotFoundException");
            } catch (java.io.FileNotFoundException fnfe) {
                System.out.println("Good: got " + fnfe);
                fnfe.printStackTrace(System.out);
            } finally {
                IOUtils.closeStream(out);
            }
        }
        {
            // test append permission.
            //set root to all writable 
            Path root = new Path("/");
            fs.setPermission(root, new FsPermission((short) 0777));
            fs.close();
            // login as a different user
            final UserGroupInformation superuser = UserGroupInformation.getCurrentUser();
            String username = "testappenduser";
            String group = "testappendgroup";
            assertFalse(superuser.getShortUserName().equals(username));
            assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
            UserGroupInformation appenduser = UserGroupInformation.createUserForTesting(username, new String[] { group });
            fs = DFSTestUtil.getFileSystemAs(appenduser, conf);
            // create a file
            Path dir = new Path(root, getClass().getSimpleName());
            Path foo = new Path(dir, "foo.dat");
            FSDataOutputStream out = null;
            int offset = 0;
            try {
                out = fs.create(foo);
                int len = 10 + AppendTestUtil.nextInt(100);
                out.write(fileContents, offset, len);
                offset += len;
            } finally {
                IOUtils.closeStream(out);
            }
            // change dir and foo to minimal permissions.
            fs.setPermission(dir, new FsPermission((short) 0100));
            fs.setPermission(foo, new FsPermission((short) 0200));
            // try append, should success
            out = null;
            try {
                out = fs.append(foo);
                int len = 10 + AppendTestUtil.nextInt(100);
                out.write(fileContents, offset, len);
                offset += len;
            } finally {
                IOUtils.closeStream(out);
            }
            // change dir and foo to all but no write on foo.
            fs.setPermission(foo, new FsPermission((short) 0577));
            fs.setPermission(dir, new FsPermission((short) 0777));
            // try append, should fail
            out = null;
            try {
                out = fs.append(foo);
                fail("Expected to have AccessControlException");
            } catch (AccessControlException ace) {
                System.out.println("Good: got " + ace);
                ace.printStackTrace(System.out);
            } finally {
                IOUtils.closeStream(out);
            }
        }
    } catch (IOException e) {
        System.out.println("Exception :" + e);
        throw e;
    } catch (Throwable e) {
        System.out.println("Throwable :" + e);
        e.printStackTrace();
        throw new IOException("Throwable : " + e);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 27 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class TestHDFSTrash method testDeleteTrash.

@Test
public void testDeleteTrash() throws Exception {
    Configuration testConf = new Configuration(conf);
    testConf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10");
    Path user1Tmp = new Path(TEST_ROOT, "test-del-u1");
    Path user2Tmp = new Path(TEST_ROOT, "test-del-u2");
    // login as user1, move something to trash
    // verify user1 can remove its own trash dir
    fs = DFSTestUtil.login(fs, testConf, user1);
    fs.mkdirs(user1Tmp);
    Trash u1Trash = getPerUserTrash(user1, fs, testConf);
    Path u1t = u1Trash.getCurrentTrashDir(user1Tmp);
    assertTrue(String.format("Failed to move %s to trash", user1Tmp), u1Trash.moveToTrash(user1Tmp));
    assertTrue(String.format("%s should be allowed to remove its own trash directory %s", user1.getUserName(), u1t), fs.delete(u1t, true));
    assertFalse(fs.exists(u1t));
    // login as user2, move something to trash
    fs = DFSTestUtil.login(fs, testConf, user2);
    fs.mkdirs(user2Tmp);
    Trash u2Trash = getPerUserTrash(user2, fs, testConf);
    u2Trash.moveToTrash(user2Tmp);
    Path u2t = u2Trash.getCurrentTrashDir(user2Tmp);
    try {
        // user1 should not be able to remove user2's trash dir
        fs = DFSTestUtil.login(fs, testConf, user1);
        fs.delete(u2t, true);
        fail(String.format("%s should not be able to remove %s trash directory", USER1_NAME, USER2_NAME));
    } catch (AccessControlException e) {
        assertTrue(e instanceof AccessControlException);
        assertTrue("Permission denied messages must carry the username", e.getMessage().contains(USER1_NAME));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) AccessControlException(org.apache.hadoop.security.AccessControlException) Trash(org.apache.hadoop.fs.Trash) TestTrash(org.apache.hadoop.fs.TestTrash) Test(org.junit.Test)

Example 28 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class TestDelegationToken method testDelegationTokenSecretManager.

@Test
public void testDelegationTokenSecretManager() throws Exception {
    Token<DelegationTokenIdentifier> token = generateDelegationToken("SomeUser", "JobTracker");
    // Fake renewer should not be able to renew
    try {
        dtSecretManager.renewToken(token, "FakeRenewer");
        Assert.fail("should have failed");
    } catch (AccessControlException ace) {
    // PASS
    }
    dtSecretManager.renewToken(token, "JobTracker");
    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
    byte[] tokenId = token.getIdentifier();
    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
    LOG.info("Sleep to expire the token");
    Thread.sleep(6000);
    //Token should be expired
    try {
        dtSecretManager.retrievePassword(identifier);
        //Should not come here
        Assert.fail("Token should have expired");
    } catch (InvalidToken e) {
    //Success
    }
    dtSecretManager.renewToken(token, "JobTracker");
    LOG.info("Sleep beyond the max lifetime");
    Thread.sleep(5000);
    try {
        dtSecretManager.renewToken(token, "JobTracker");
        Assert.fail("should have been expired");
    } catch (InvalidToken it) {
    // PASS
    }
}
Also used : DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) ByteArrayInputStream(java.io.ByteArrayInputStream) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) AccessControlException(org.apache.hadoop.security.AccessControlException) DataInputStream(java.io.DataInputStream) Test(org.junit.Test)

Example 29 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class WebHdfsFileSystem method getDelegationToken.

@Override
public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer) throws IOException {
    final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
    Token<DelegationTokenIdentifier> token = new FsPathResponseRunner<Token<DelegationTokenIdentifier>>(op, null, new RenewerParam(renewer)) {

        @Override
        Token<DelegationTokenIdentifier> decodeResponse(Map<?, ?> json) throws IOException {
            return JsonUtilClient.toDelegationToken(json);
        }
    }.run();
    if (token != null) {
        token.setService(tokenServiceName);
    } else {
        if (disallowFallbackToInsecureCluster) {
            throw new AccessControlException(CANT_FALLBACK_TO_INSECURE_MSG);
        }
    }
    return token;
}
Also used : DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) Op(org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op) AccessControlException(org.apache.hadoop.security.AccessControlException) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Token(org.apache.hadoop.security.token.Token) IOException(java.io.IOException)

Example 30 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class CacheManager method listCacheDirectives.

public BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter, FSPermissionChecker pc) throws IOException {
    assert namesystem.hasReadLock();
    final int NUM_PRE_ALLOCATED_ENTRIES = 16;
    String filterPath = null;
    if (filter.getPath() != null) {
        filterPath = validatePath(filter);
    }
    if (filter.getReplication() != null) {
        throw new InvalidRequestException("Filtering by replication is unsupported.");
    }
    // Querying for a single ID
    final Long id = filter.getId();
    if (id != null) {
        if (!directivesById.containsKey(id)) {
            throw new InvalidRequestException("Did not find requested id " + id);
        }
        // Since we use a tailMap on directivesById, setting prev to id-1 gets
        // us the directive with the id (if present)
        prevId = id - 1;
    }
    ArrayList<CacheDirectiveEntry> replies = new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
    int numReplies = 0;
    SortedMap<Long, CacheDirective> tailMap = directivesById.tailMap(prevId + 1);
    for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
        if (numReplies >= maxListCacheDirectivesNumResponses) {
            return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
        }
        CacheDirective curDirective = cur.getValue();
        CacheDirectiveInfo info = cur.getValue().toInfo();
        // item and should break out.
        if (id != null && !(info.getId().equals(id))) {
            break;
        }
        if (filter.getPool() != null && !info.getPool().equals(filter.getPool())) {
            continue;
        }
        if (filterPath != null && !info.getPath().toUri().getPath().equals(filterPath)) {
            continue;
        }
        boolean hasPermission = true;
        if (pc != null) {
            try {
                pc.checkPermission(curDirective.getPool(), FsAction.READ);
            } catch (AccessControlException e) {
                hasPermission = false;
            }
        }
        if (hasPermission) {
            replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
            numReplies++;
        }
    }
    return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
Also used : ArrayList(java.util.ArrayList) AccessControlException(org.apache.hadoop.security.AccessControlException) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) BatchedListEntries(org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException)

Aggregations

AccessControlException (org.apache.hadoop.security.AccessControlException)128 Test (org.junit.Test)59 Path (org.apache.hadoop.fs.Path)53 IOException (java.io.IOException)52 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)35 FsPermission (org.apache.hadoop.fs.permission.FsPermission)32 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)22 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)21 FileSystem (org.apache.hadoop.fs.FileSystem)19 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 Configuration (org.apache.hadoop.conf.Configuration)11 FileNotFoundException (java.io.FileNotFoundException)10 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)8 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)7 FileStatus (org.apache.hadoop.fs.FileStatus)6 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)6 Text (org.apache.hadoop.io.Text)5 InvalidToken (org.apache.hadoop.security.token.SecretManager.InvalidToken)5 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)5 ArrayList (java.util.ArrayList)4