Search in sources :

Example 16 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestReservedRawPaths method testAdminAccessOnly.

@Test(timeout = 120000)
public void testAdminAccessOnly() throws Exception {
    final Path zone = new Path("zone");
    final Path slashZone = new Path("/", zone);
    fs.mkdirs(slashZone);
    dfsAdmin.createEncryptionZone(slashZone, TEST_KEY, NO_TRASH);
    final Path base = new Path("base");
    final Path reservedRaw = new Path("/.reserved/raw");
    final int len = 8192;
    /* Test failure of create file in reserved/raw as non admin */
    final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final DistributedFileSystem fs = cluster.getFileSystem();
            try {
                final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
                DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
                fail("access to /.reserved/raw is superuser-only operation");
            } catch (AccessControlException e) {
                assertExceptionContains("Superuser privilege is required", e);
            }
            return null;
        }
    });
    /* Test failure of getFileStatus in reserved/raw as non admin */
    final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
    DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final DistributedFileSystem fs = cluster.getFileSystem();
            try {
                fs.getFileStatus(ezRawEncFile);
                fail("access to /.reserved/raw is superuser-only operation");
            } catch (AccessControlException e) {
                assertExceptionContains("Superuser privilege is required", e);
            }
            return null;
        }
    });
    /* Test failure of listStatus in reserved/raw as non admin */
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final DistributedFileSystem fs = cluster.getFileSystem();
            try {
                fs.listStatus(ezRawEncFile);
                fail("access to /.reserved/raw is superuser-only operation");
            } catch (AccessControlException e) {
                assertExceptionContains("Superuser privilege is required", e);
            }
            return null;
        }
    });
    fs.setPermission(new Path("/"), new FsPermission((short) 0777));
    /* Test failure of mkdir in reserved/raw as non admin */
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final DistributedFileSystem fs = cluster.getFileSystem();
            final Path d1 = new Path(reservedRaw, "dir1");
            try {
                fs.mkdirs(d1);
                fail("access to /.reserved/raw is superuser-only operation");
            } catch (AccessControlException e) {
                assertExceptionContains("Superuser privilege is required", e);
            }
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 17 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestDiskError method testLocalDirs.

/**
   * Check that the permissions of the local DN directories are as expected.
   */
@Test
public void testLocalDirs() throws Exception {
    Configuration conf = new Configuration();
    final String permStr = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
    FsPermission expected = new FsPermission(permStr);
    // Check permissions on directories in 'dfs.datanode.data.dir'
    FileSystem localFS = FileSystem.getLocal(conf);
    for (DataNode dn : cluster.getDataNodes()) {
        try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
            for (FsVolumeSpi vol : volumes) {
                Path dataDir = new Path(vol.getStorageLocation().getNormalizedUri());
                FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
                assertEquals("Permission for dir: " + dataDir + ", is " + actual + ", while expected is " + expected, expected, actual);
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 18 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestOfflineImageViewer method createOriginalFSImage.

// Create a populated namespace for later testing. Save its contents to a
// data structure and store its fsimage location.
// We only want to generate the fsimage file once and use it for
// multiple tests.
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    tempDir = Files.createTempDir();
    MiniDFSCluster cluster = null;
    try {
        final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID);
        Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
        conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, ecPolicy.getName());
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a reasonable namespace
        for (int i = 0; i < NUM_DIRS; i++, dirCount++) {
            Path dir = new Path("/dir" + i);
            hdfs.mkdirs(dir);
            writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
            for (int j = 0; j < FILES_PER_DIR; j++) {
                Path file = new Path(dir, "file" + j);
                FSDataOutputStream o = hdfs.create(file);
                o.write(23);
                o.close();
                writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString()));
            }
        }
        // Create an empty directory
        Path emptydir = new Path("/emptydir");
        hdfs.mkdirs(emptydir);
        dirCount++;
        writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
        //Create a directory whose name should be escaped in XML
        Path invalidXMLDir = new Path("/dirContainingInvalidXMLCharhere");
        hdfs.mkdirs(invalidXMLDir);
        dirCount++;
        //Create a directory with sticky bits
        Path stickyBitDir = new Path("/stickyBit");
        hdfs.mkdirs(stickyBitDir);
        hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true));
        dirCount++;
        writtenFiles.put(stickyBitDir.toString(), hdfs.getFileStatus(stickyBitDir));
        // Get delegation tokens so we log the delegation token op
        Token<?>[] delegationTokens = hdfs.addDelegationTokens(TEST_RENEWER, null);
        for (Token<?> t : delegationTokens) {
            LOG.debug("got token " + t);
        }
        // Create INodeReference
        final Path src = new Path("/src");
        hdfs.mkdirs(src);
        dirCount++;
        writtenFiles.put(src.toString(), hdfs.getFileStatus(src));
        // Create snapshot and snapshotDiff.
        final Path orig = new Path("/src/orig");
        hdfs.mkdirs(orig);
        final Path file1 = new Path("/src/file");
        FSDataOutputStream o = hdfs.create(file1);
        o.write(23);
        o.write(45);
        o.close();
        hdfs.allowSnapshot(src);
        hdfs.createSnapshot(src, "snapshot");
        final Path dst = new Path("/dst");
        // Rename a directory in the snapshot directory to add snapshotCopy
        // field to the dirDiff entry.
        hdfs.rename(orig, dst);
        dirCount++;
        writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst));
        // Truncate a file in the snapshot directory to add snapshotCopy and
        // blocks fields to the fileDiff entry.
        hdfs.truncate(file1, 1);
        writtenFiles.put(file1.toString(), hdfs.getFileStatus(file1));
        // Set XAttrs so the fsimage contains XAttr ops
        final Path xattr = new Path("/xattr");
        hdfs.mkdirs(xattr);
        dirCount++;
        hdfs.setXAttr(xattr, "user.a1", new byte[] { 0x31, 0x32, 0x33 });
        hdfs.setXAttr(xattr, "user.a2", new byte[] { 0x37, 0x38, 0x39 });
        // OIV should be able to handle empty value XAttrs
        hdfs.setXAttr(xattr, "user.a3", null);
        // OIV should be able to handle XAttr values that can't be expressed
        // as UTF8
        hdfs.setXAttr(xattr, "user.a4", new byte[] { -0x3d, 0x28 });
        writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
        // Set ACLs
        hdfs.setAcl(xattr, Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE)));
        // Create an Erasure Coded dir
        Path ecDir = new Path("/ec");
        hdfs.mkdirs(ecDir);
        dirCount++;
        hdfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
        writtenFiles.put(ecDir.toString(), hdfs.getFileStatus(ecDir));
        // Create an empty Erasure Coded file
        Path emptyECFile = new Path(ecDir, "EmptyECFile.txt");
        hdfs.create(emptyECFile).close();
        writtenFiles.put(emptyECFile.toString(), pathToFileEntry(hdfs, emptyECFile.toString()));
        filesECCount++;
        // Create a small Erasure Coded file
        Path smallECFile = new Path(ecDir, "SmallECFile.txt");
        FSDataOutputStream out = hdfs.create(smallECFile);
        Random r = new Random();
        byte[] bytes = new byte[1024 * 10];
        r.nextBytes(bytes);
        out.write(bytes);
        writtenFiles.put(smallECFile.toString(), pathToFileEntry(hdfs, smallECFile.toString()));
        filesECCount++;
        // Write results to the fsimage file
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
        // Determine location of fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Token(org.apache.hadoop.security.token.Token) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Random(java.util.Random) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) BeforeClass(org.junit.BeforeClass)

Example 19 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestFSMainOperationsWebHdfs method setupCluster.

@BeforeClass
public static void setupCluster() {
    final Configuration conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
        cluster.waitActive();
        //change root permission to 777
        cluster.getFileSystem().setPermission(new Path("/"), new FsPermission((short) 0777));
        final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
        //get file system as a non-superuser
        final UserGroupInformation current = UserGroupInformation.getCurrentUser();
        final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(current.getShortUserName() + "x", new String[] { "user" });
        fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {

            @Override
            public FileSystem run() throws Exception {
                return FileSystem.get(new URI(uri), conf);
            }
        });
        defaultWorkingDirectory = fileSystem.getWorkingDirectory();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) URI(java.net.URI) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) BeforeClass(org.junit.BeforeClass)

Example 20 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class BaseTestHttpFSWith method testCreate.

private void testCreate(Path path, boolean override) throws Exception {
    FileSystem fs = getHttpFSFileSystem();
    FsPermission permission = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    OutputStream os = fs.create(new Path(path.toUri().getPath()), permission, override, 1024, (short) 2, 100 * 1024 * 1024, null);
    os.write(1);
    os.close();
    fs.close();
    fs = FileSystem.get(getProxiedFSConf());
    FileStatus status = fs.getFileStatus(path);
    if (!isLocalFS()) {
        assertEquals(status.getReplication(), 2);
        assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
    }
    assertEquals(status.getPermission(), permission);
    InputStream is = fs.open(path);
    assertEquals(is.read(), 1);
    is.close();
    fs.close();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) InputStream(java.io.InputStream) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15