Search in sources :

Example 11 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class FileSystemContractBaseTest method testMkdirsWithUmask.

public void testMkdirsWithUmask() throws Exception {
    if (!isS3(fs)) {
        Configuration conf = fs.getConf();
        String oldUmask = conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
        try {
            conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, TEST_UMASK);
            final Path dir = path("newDir");
            assertTrue(fs.mkdirs(dir, new FsPermission((short) 0777)));
            FileStatus status = fs.getFileStatus(dir);
            assertTrue(status.isDirectory());
            assertEquals((short) 0715, status.getPermission().toShort());
        } finally {
            conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, oldUmask);
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 12 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class FileSystemTestWrapper method create.

@Override
public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag, CreateOpts... opts) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
    // Need to translate the FileContext-style options into FileSystem-style
    // Permissions with umask
    CreateOpts.Perms permOpt = CreateOpts.getOpt(CreateOpts.Perms.class, opts);
    FsPermission umask = FsPermission.getUMask(fs.getConf());
    FsPermission permission = (permOpt != null) ? permOpt.getValue() : FsPermission.getFileDefault().applyUMask(umask);
    permission = permission.applyUMask(umask);
    // Overwrite
    boolean overwrite = createFlag.contains(CreateFlag.OVERWRITE);
    // bufferSize
    int bufferSize = fs.getConf().getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
    CreateOpts.BufferSize bufOpt = CreateOpts.getOpt(CreateOpts.BufferSize.class, opts);
    bufferSize = (bufOpt != null) ? bufOpt.getValue() : bufferSize;
    // replication
    short replication = fs.getDefaultReplication(f);
    CreateOpts.ReplicationFactor repOpt = CreateOpts.getOpt(CreateOpts.ReplicationFactor.class, opts);
    replication = (repOpt != null) ? repOpt.getValue() : replication;
    // blockSize
    long blockSize = fs.getDefaultBlockSize(f);
    CreateOpts.BlockSize blockOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, opts);
    blockSize = (blockOpt != null) ? blockOpt.getValue() : blockSize;
    // Progressable
    Progressable progress = null;
    CreateOpts.Progress progressOpt = CreateOpts.getOpt(CreateOpts.Progress.class, opts);
    progress = (progressOpt != null) ? progressOpt.getValue() : progress;
    return fs.create(f, permission, overwrite, bufferSize, replication, blockSize, progress);
}
Also used : CreateOpts(org.apache.hadoop.fs.Options.CreateOpts) Progressable(org.apache.hadoop.util.Progressable) FsPermission(org.apache.hadoop.fs.permission.FsPermission) BlockSize(org.apache.hadoop.fs.Options.CreateOpts.BlockSize)

Example 13 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestTrash method verifyTrashPermission.

/**
   * Create a bunch of files and set with different permission, after
   * moved to trash, verify the location in trash directory is expected
   * and the permission is reserved.
   *
   * @throws IOException
   */
public static void verifyTrashPermission(FileSystem fs, Configuration conf) throws IOException {
    Path caseRoot = new Path(GenericTestUtils.getTempPath("testTrashPermission"));
    try (FileSystem fileSystem = fs) {
        Trash trash = new Trash(fileSystem, conf);
        FileSystemTestWrapper wrapper = new FileSystemTestWrapper(fileSystem);
        short[] filePermssions = { (short) 0600, (short) 0644, (short) 0660, (short) 0700, (short) 0750, (short) 0755, (short) 0775, (short) 0777 };
        for (int i = 0; i < filePermssions.length; i++) {
            // Set different permission to files
            FsPermission fsPermission = new FsPermission(filePermssions[i]);
            Path file = new Path(caseRoot, "file" + i);
            byte[] randomBytes = new byte[new Random().nextInt(10)];
            wrapper.writeFile(file, randomBytes);
            wrapper.setPermission(file, fsPermission);
            // Move file to trash
            trash.moveToTrash(file);
            // Verify the file is moved to trash, at expected location
            Path trashDir = trash.getCurrentTrashDir(file);
            if (!file.isAbsolute()) {
                file = wrapper.makeQualified(file);
            }
            Path fileInTrash = Path.mergePaths(trashDir, file);
            FileStatus fstat = wrapper.getFileStatus(fileInTrash);
            assertTrue(String.format("File %s is not moved to trash", fileInTrash.toString()), wrapper.exists(fileInTrash));
            // Verify permission not change
            assertTrue(String.format("Expected file: %s is %s, but actual is %s", fileInTrash.toString(), fsPermission.toString(), fstat.getPermission().toString()), fstat.getPermission().equals(fsPermission));
        }
        // Verify the trash directory can be removed
        Path trashRoot = trash.getCurrentTrashDir();
        assertTrue(wrapper.delete(trashRoot, true));
    }
}
Also used : Random(java.util.Random) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 14 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class StorageLocation method makeBlockPoolDir.

/**
   * Create physical directory for block pools on the data node.
   *
   * @param blockPoolID
   *          the block pool id
   * @param conf
   *          Configuration instance to use.
   * @throws IOException on errors
   */
public void makeBlockPoolDir(String blockPoolID, Configuration conf) throws IOException {
    if (conf == null) {
        conf = new HdfsConfiguration();
    }
    LocalFileSystem localFS = FileSystem.getLocal(conf);
    FsPermission permission = new FsPermission(conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
    File data = new File(getBpURI(blockPoolID, Storage.STORAGE_DIR_CURRENT));
    try {
        DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch (IOException e) {
        DataStorage.LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": " + e.getMessage());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File)

Example 15 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class FSPermissionChecker method hasPermission.

// return whether access is permitted.  note it neither requires a path or
// throws so the caller can build the path only if required for an exception.
// very beneficial for subaccess checks!
private boolean hasPermission(INodeAttributes inode, FsAction access) {
    if (inode == null) {
        return true;
    }
    final FsPermission mode = inode.getFsPermission();
    final AclFeature aclFeature = inode.getAclFeature();
    if (aclFeature != null) {
        // It's possible that the inode has a default ACL but no access ACL.
        int firstEntry = aclFeature.getEntryAt(0);
        if (AclEntryStatusFormat.getScope(firstEntry) == AclEntryScope.ACCESS) {
            return hasAclPermission(inode, access, mode, aclFeature);
        }
    }
    final FsAction checkAction;
    if (getUser().equals(inode.getUserName())) {
        //user class
        checkAction = mode.getUserAction();
    } else if (isMemberOfGroup(inode.getGroupName())) {
        //group class
        checkAction = mode.getGroupAction();
    } else {
        //other class
        checkAction = mode.getOtherAction();
    }
    return checkAction.implies(access);
}
Also used : FsAction(org.apache.hadoop.fs.permission.FsAction) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15