Search in sources :

Example 41 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class DFSTestUtil method createStripedFile.

/**
   * Creates the metadata of a file in striped layout. This method only
   * manipulates the NameNode state without injecting data to DataNode.
   * You should disable periodical heartbeat before use this.
   * @param file Path of the file to create
   * @param dir Parent path of the file
   * @param numBlocks Number of striped block groups to add to the file
   * @param numStripesPerBlk Number of striped cells in each block
   * @param toMkdir
   * @param ecPolicy erasure coding policy apply to created file. A null value
   *                 means using default erasure coding policy.
   */
public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir, ErasureCodingPolicy ecPolicy) throws Exception {
    DistributedFileSystem dfs = cluster.getFileSystem();
    // If outer test already set EC policy, dir should be left as null
    if (toMkdir) {
        assert dir != null;
        dfs.mkdirs(dir);
        try {
            dfs.getClient().setErasureCodingPolicy(dir.toString(), ecPolicy.getName());
        } catch (IOException e) {
            if (!e.getMessage().contains("non-empty directory")) {
                throw e;
            }
        }
    }
    cluster.getNameNodeRpc().create(file.toString(), new FsPermission((short) 0755), dfs.getClient().getClientName(), new EnumSetWritable<>(EnumSet.of(CreateFlag.CREATE)), false, (short) 1, 128 * 1024 * 1024L, null);
    FSNamesystem ns = cluster.getNamesystem();
    FSDirectory fsdir = ns.getFSDirectory();
    INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
    ExtendedBlock previous = null;
    for (int i = 0; i < numBlocks; i++) {
        Block newBlock = addBlockToFile(true, cluster.getDataNodes(), dfs, ns, file.toString(), fileNode, dfs.getClient().getClientName(), previous, numStripesPerBlk, 0);
        previous = new ExtendedBlock(ns.getBlockPoolId(), newBlock);
    }
    dfs.getClient().namenode.complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 42 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestSymlinkHdfs method testSetPermissionAffectsTarget.

@Test(timeout = 10000)
public /** setPermission affects the target not the link */
void testSetPermissionAffectsTarget() throws IOException {
    Path file = new Path(testBaseDir1(), "file");
    Path dir = new Path(testBaseDir2());
    Path linkToFile = new Path(testBaseDir1(), "linkToFile");
    Path linkToDir = new Path(testBaseDir1(), "linkToDir");
    createAndWriteFile(file);
    wrapper.createSymlink(file, linkToFile, false);
    wrapper.createSymlink(dir, linkToDir, false);
    // Changing the permissions using the link does not modify
    // the permissions of the link..
    FsPermission perms = wrapper.getFileLinkStatus(linkToFile).getPermission();
    wrapper.setPermission(linkToFile, new FsPermission((short) 0664));
    wrapper.setOwner(linkToFile, "user", "group");
    assertEquals(perms, wrapper.getFileLinkStatus(linkToFile).getPermission());
    // but the file's permissions were adjusted appropriately
    FileStatus stat = wrapper.getFileStatus(file);
    assertEquals(0664, stat.getPermission().toShort());
    assertEquals("user", stat.getOwner());
    assertEquals("group", stat.getGroup());
    // Getting the file's permissions via the link is the same
    // as getting the permissions directly.
    assertEquals(stat.getPermission(), wrapper.getFileStatus(linkToFile).getPermission());
    // Ditto for a link to a directory
    perms = wrapper.getFileLinkStatus(linkToDir).getPermission();
    wrapper.setPermission(linkToDir, new FsPermission((short) 0664));
    wrapper.setOwner(linkToDir, "user", "group");
    assertEquals(perms, wrapper.getFileLinkStatus(linkToDir).getPermission());
    stat = wrapper.getFileStatus(dir);
    assertEquals(0664, stat.getPermission().toShort());
    assertEquals("user", stat.getOwner());
    assertEquals("group", stat.getGroup());
    assertEquals(stat.getPermission(), wrapper.getFileStatus(linkToDir).getPermission());
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 43 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestAuditLogs method testAuditWebHdfsStat.

/** test that stat via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsStat() throws Exception {
    final Path file = new Path(fnames[0]);
    fs.setPermission(file, new FsPermission((short) 0644));
    fs.setOwner(file, "root", null);
    setupAuditLogs();
    WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
    FileStatus st = webfs.getFileStatus(file);
    verifyAuditLogs(true);
    assertTrue("failed to stat file", st != null && st.isFile());
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Test(org.junit.Test)

Example 44 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestAuditLogs method testAuditDenied.

/** test that denied operation puts proper entry in audit log */
@Test
public void testAuditDenied() throws Exception {
    final Path file = new Path(fnames[0]);
    FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
    fs.setPermission(file, new FsPermission((short) 0600));
    fs.setOwner(file, "root", null);
    setupAuditLogs();
    try {
        userfs.open(file);
        fail("open must not succeed");
    } catch (AccessControlException e) {
        System.out.println("got access denied, as expected.");
    }
    verifyAuditLogs(false);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 45 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestAuditLogs method testAuditWebHdfsDenied.

/** test that denied access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsDenied() throws Exception {
    final Path file = new Path(fnames[0]);
    fs.setPermission(file, new FsPermission((short) 0600));
    fs.setOwner(file, "root", null);
    setupAuditLogs();
    try {
        WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
        InputStream istream = webfs.open(file);
        int val = istream.read();
        fail("open+read must not succeed, got " + val);
    } catch (AccessControlException E) {
        System.out.println("got access denied, as expected.");
    }
    verifyAuditLogsRepeat(false, 2);
}
Also used : Path(org.apache.hadoop.fs.Path) InputStream(java.io.InputStream) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Test(org.junit.Test)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15