Search in sources :

Example 26 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestFileAppend2 method testSimpleAppend.

/**
   * Creates one file, writes a few bytes to it and then closed it.
   * Reopens the same file for appending, write all blocks and then close.
   * Verify that all data exists in file.
   * @throws IOException an exception might be thrown
   */
@Test
public void testSimpleAppend() throws IOException {
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
    fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    try {
        {
            // test appending to a file.
            // create a new file.
            Path file1 = new Path("/simpleAppend.dat");
            FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
            System.out.println("Created file simpleAppend.dat");
            // write to file
            // io.bytes.per.checksum bytes
            int mid = 186;
            System.out.println("Writing " + mid + " bytes to file " + file1);
            stm.write(fileContents, 0, mid);
            stm.close();
            System.out.println("Wrote and Closed first part of file.");
            // write to file
            // io.bytes.per.checksum bytes
            int mid2 = 607;
            System.out.println("Writing " + mid + " bytes to file " + file1);
            stm = fs.append(file1);
            stm.write(fileContents, mid, mid2 - mid);
            stm.close();
            System.out.println("Wrote and Closed second part of file.");
            // write the remainder of the file
            stm = fs.append(file1);
            // ensure getPos is set to reflect existing size of the file
            assertTrue(stm.getPos() > 0);
            System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file " + file1);
            stm.write(fileContents, mid2, AppendTestUtil.FILE_SIZE - mid2);
            System.out.println("Written second part of file");
            stm.close();
            System.out.println("Wrote and Closed second part of file.");
            // verify that entire file is good
            AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
        }
        {
            // test appending to an non-existing file.
            FSDataOutputStream out = null;
            try {
                out = fs.append(new Path("/non-existing.dat"));
                fail("Expected to have FileNotFoundException");
            } catch (java.io.FileNotFoundException fnfe) {
                System.out.println("Good: got " + fnfe);
                fnfe.printStackTrace(System.out);
            } finally {
                IOUtils.closeStream(out);
            }
        }
        {
            // test append permission.
            //set root to all writable 
            Path root = new Path("/");
            fs.setPermission(root, new FsPermission((short) 0777));
            fs.close();
            // login as a different user
            final UserGroupInformation superuser = UserGroupInformation.getCurrentUser();
            String username = "testappenduser";
            String group = "testappendgroup";
            assertFalse(superuser.getShortUserName().equals(username));
            assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
            UserGroupInformation appenduser = UserGroupInformation.createUserForTesting(username, new String[] { group });
            fs = DFSTestUtil.getFileSystemAs(appenduser, conf);
            // create a file
            Path dir = new Path(root, getClass().getSimpleName());
            Path foo = new Path(dir, "foo.dat");
            FSDataOutputStream out = null;
            int offset = 0;
            try {
                out = fs.create(foo);
                int len = 10 + AppendTestUtil.nextInt(100);
                out.write(fileContents, offset, len);
                offset += len;
            } finally {
                IOUtils.closeStream(out);
            }
            // change dir and foo to minimal permissions.
            fs.setPermission(dir, new FsPermission((short) 0100));
            fs.setPermission(foo, new FsPermission((short) 0200));
            // try append, should success
            out = null;
            try {
                out = fs.append(foo);
                int len = 10 + AppendTestUtil.nextInt(100);
                out.write(fileContents, offset, len);
                offset += len;
            } finally {
                IOUtils.closeStream(out);
            }
            // change dir and foo to all but no write on foo.
            fs.setPermission(foo, new FsPermission((short) 0577));
            fs.setPermission(dir, new FsPermission((short) 0777));
            // try append, should fail
            out = null;
            try {
                out = fs.append(foo);
                fail("Expected to have AccessControlException");
            } catch (AccessControlException ace) {
                System.out.println("Good: got " + ace);
                ace.printStackTrace(System.out);
            } finally {
                IOUtils.closeStream(out);
            }
        }
    } catch (IOException e) {
        System.out.println("Exception :" + e);
        throw e;
    } catch (Throwable e) {
        System.out.println("Throwable :" + e);
        e.printStackTrace();
        throw new IOException("Throwable : " + e);
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 27 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestFsck method testFsckFileNotFound.

/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
    // Number of replicas to actually start
    final short numReplicas = 1;
    NameNode namenode = mock(NameNode.class);
    NetworkTopology nettop = mock(NetworkTopology.class);
    Map<String, String[]> pmap = new HashMap<>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    FSNamesystem fsName = mock(FSNamesystem.class);
    FSDirectory fsd = mock(FSDirectory.class);
    BlockManager blockManager = mock(BlockManager.class);
    DatanodeManager dnManager = mock(DatanodeManager.class);
    INodesInPath iip = mock(INodesInPath.class);
    when(namenode.getNamesystem()).thenReturn(fsName);
    when(fsName.getBlockManager()).thenReturn(blockManager);
    when(fsName.getFSDirectory()).thenReturn(fsd);
    when(fsd.getFSNamesystem()).thenReturn(fsName);
    when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
    when(blockManager.getDatanodeManager()).thenReturn(dnManager);
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
    String pathString = "/tmp/testFile";
    long length = 123L;
    boolean isDir = false;
    int blockReplication = 1;
    long blockSize = 128 * 1024L;
    long modTime = 123123123L;
    long accessTime = 123123120L;
    FsPermission perms = FsPermission.getDefault();
    String owner = "foo";
    String group = "bar";
    byte[] symlink = null;
    byte[] path = DFSUtil.string2Bytes(pathString);
    long fileId = 312321L;
    int numChildren = 1;
    byte storagePolicy = 0;
    HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    try {
        fsck.check(pathString, file, replRes, ecRes);
    } catch (Exception e) {
        fail("Unexpected exception " + e.getMessage());
    }
    assertTrue(replRes.isHealthy());
}
Also used : DirOp(org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PrintWriter(java.io.PrintWriter) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) NetworkTopology(org.apache.hadoop.net.NetworkTopology) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Test(org.junit.Test)

Example 28 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestINodeFile method testValidSymlinkTarget.

private void testValidSymlinkTarget(NamenodeProtocols nnRpc, String target, String link) throws IOException {
    FsPermission perm = FsPermission.createImmutable((short) 0755);
    nnRpc.createSymlink(target, link, perm, false);
    assertEquals(target, nnRpc.getLinkTarget(link));
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 29 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestINodeFile method testInodeIdBasedPaths.

/**
   * Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
   * operations.
   */
@Test
public void testInodeIdBasedPaths() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
        // FileSystem#mkdirs "/testInodeIdBasedPaths"
        Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
        Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
        fs.mkdirs(baseDir);
        fs.exists(baseDir);
        long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
        // FileSystem#create file and FileSystem#close
        Path testFileInodePath = getInodePath(baseDirFileId, "test1");
        Path testFileRegularPath = new Path(baseDir, "test1");
        final int testFileBlockSize = 1024;
        FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
        assertTrue(fs.exists(testFileInodePath));
        // FileSystem#setPermission
        FsPermission perm = new FsPermission((short) 0666);
        fs.setPermission(testFileInodePath, perm);
        // FileSystem#getFileStatus and FileSystem#getPermission
        FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(perm, fileStatus.getPermission());
        // FileSystem#setOwner
        fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
        // FileSystem#setTimes
        fs.setTimes(testFileInodePath, 0, 0);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(0, fileStatus.getModificationTime());
        assertEquals(0, fileStatus.getAccessTime());
        // FileSystem#setReplication
        fs.setReplication(testFileInodePath, (short) 3);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(3, fileStatus.getReplication());
        fs.setReplication(testFileInodePath, (short) 1);
        // ClientProtocol#getPreferredBlockSize
        assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
        /*
       * HDFS-6749 added missing calls to FSDirectory.resolvePath in the
       * following four methods. The calls below ensure that
       * /.reserved/.inodes paths work properly. No need to check return
       * values as these methods are tested elsewhere.
       */
        {
            fs.isFileClosed(testFileInodePath);
            fs.getAclStatus(testFileInodePath);
            fs.getXAttrs(testFileInodePath);
            fs.listXAttrs(testFileInodePath);
            fs.access(testFileInodePath, FsAction.READ_WRITE);
        }
        // symbolic link related tests
        // Reserved path is not allowed as a target
        String invalidTarget = new Path(baseDir, "invalidTarget").toString();
        String link = new Path(baseDir, "link").toString();
        testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
        // Test creating a link using reserved inode path
        String validTarget = "/validtarget";
        testValidSymlinkTarget(nnRpc, validTarget, link);
        // FileSystem#append
        fs.append(testFileInodePath);
        // DistributedFileSystem#recoverLease
        fs.recoverLease(testFileInodePath);
        // Namenode#getBlockLocations
        LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(), 0, Long.MAX_VALUE);
        LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(), 0, Long.MAX_VALUE);
        checkEquals(l1, l2);
        // FileSystem#rename - both the variants
        Path renameDst = getInodePath(baseDirFileId, "test2");
        fileStatus = fs.getFileStatus(testFileInodePath);
        // Rename variant 1: rename and rename bacck
        fs.rename(testFileInodePath, renameDst);
        fs.rename(renameDst, testFileInodePath);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // Rename variant 2: rename and rename bacck
        fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
        fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // FileSystem#getContentSummary
        assertEquals(fs.getContentSummary(testFileRegularPath).toString(), fs.getContentSummary(testFileInodePath).toString());
        // FileSystem#listFiles
        checkEquals(fs.listFiles(baseDirRegPath, false), fs.listFiles(baseDir, false));
        // FileSystem#delete
        fs.delete(testFileInodePath, true);
        assertFalse(fs.exists(testFileInodePath));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FsPermission(org.apache.hadoop.fs.permission.FsPermission) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 30 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestNativeIO method assertPermissions.

private void assertPermissions(File f, int expected) throws IOException {
    FileSystem localfs = FileSystem.getLocal(new Configuration());
    FsPermission perms = localfs.getFileStatus(new Path(f.getAbsolutePath())).getPermission();
    assertEquals(expected, perms.toShort());
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15