Search in sources :

Example 51 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method modifyAclEntries.

void modifyAclEntries(final String src, List<AclEntry> aclSpec) throws IOException {
    final String operationName = "modifyAclEntries";
    HdfsFileStatus auditStat = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
        auditStat = FSDirAclOp.modifyAclEntries(dir, src, aclSpec);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 52 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method setOwner.

/**
   * Set owner for an existing file.
   * @throws IOException
   */
void setOwner(String src, String username, String group) throws IOException {
    final String operationName = "setOwner";
    HdfsFileStatus auditStat;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot set owner for " + src);
        auditStat = FSDirAttrOp.setOwner(dir, src, username, group);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 53 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method getContentSummary.

/**
   * Get the content summary for a specific file/dir.
   *
   * @param src The string representation of the path to the file
   *
   * @throws AccessControlException if access is denied
   * @throws UnresolvedLinkException if a symlink is encountered.
   * @throws FileNotFoundException if no file exists
   * @throws StandbyException
   * @throws IOException for issues with writing to the audit log
   *
   * @return object containing information regarding the file
   *         or null if file not found
   */
ContentSummary getContentSummary(final String src) throws IOException {
    checkOperation(OperationCategory.READ);
    final String operationName = "contentSummary";
    readLock();
    boolean success = true;
    ContentSummary cs;
    try {
        checkOperation(OperationCategory.READ);
        cs = FSDirStatAndListingOp.getContentSummary(dir, src);
    } catch (AccessControlException ace) {
        success = false;
        logAuditEvent(success, operationName, src);
        throw ace;
    } finally {
        readUnlock(operationName);
    }
    logAuditEvent(success, operationName, src);
    return cs;
}
Also used : ContentSummary(org.apache.hadoop.fs.ContentSummary) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 54 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method getBlockLocations.

/**
   * Get block locations within the specified range.
   * @see ClientProtocol#getBlockLocations(String, long, long)
   */
LocatedBlocks getBlockLocations(String clientMachine, String srcArg, long offset, long length) throws IOException {
    final String operationName = "open";
    checkOperation(OperationCategory.READ);
    GetBlockLocationsResult res = null;
    FSPermissionChecker pc = getPermissionChecker();
    readLock();
    try {
        checkOperation(OperationCategory.READ);
        res = FSDirStatAndListingOp.getBlockLocations(dir, pc, srcArg, offset, length, true);
        if (isInSafeMode()) {
            for (LocatedBlock b : res.blocks.getLocatedBlocks()) {
                // if safemode & no block locations yet then throw safemodeException
                if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
                    SafeModeException se = newSafemodeException("Zero blocklocations for " + srcArg);
                    if (haEnabled && haContext != null && haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
                        throw new RetriableException(se);
                    } else {
                        throw se;
                    }
                }
            }
        }
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, srcArg);
        throw e;
    } finally {
        readUnlock(operationName);
    }
    logAuditEvent(true, operationName, srcArg);
    if (!isInSafeMode() && res.updateAccessTime()) {
        String src = srcArg;
        writeLock();
        final long now = now();
        try {
            checkOperation(OperationCategory.WRITE);
            /**
         * Resolve the path again and update the atime only when the file
         * exists.
         *
         * XXX: Races can still occur even after resolving the path again.
         * For example:
         *
         * <ul>
         *   <li>Get the block location for "/a/b"</li>
         *   <li>Rename "/a/b" to "/c/b"</li>
         *   <li>The second resolution still points to "/a/b", which is
         *   wrong.</li>
         * </ul>
         *
         * The behavior is incorrect but consistent with the one before
         * HDFS-7463. A better fix is to change the edit log of SetTime to
         * use inode id instead of a path.
         */
            final INodesInPath iip = dir.resolvePath(pc, srcArg, DirOp.READ);
            src = iip.getPath();
            INode inode = iip.getLastINode();
            boolean updateAccessTime = inode != null && now > inode.getAccessTime() + dir.getAccessTimePrecision();
            if (!isInSafeMode() && updateAccessTime) {
                boolean changed = FSDirAttrOp.setTimes(dir, iip, -1, now, false);
                if (changed) {
                    getEditLog().logTimes(src, -1, now);
                }
            }
        } catch (Throwable e) {
            LOG.warn("Failed to update the access time of " + src, e);
        } finally {
            writeUnlock(operationName);
        }
    }
    LocatedBlocks blocks = res.blocks;
    sortLocatedBlocks(clientMachine, blocks);
    return blocks;
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) RetriableException(org.apache.hadoop.ipc.RetriableException)

Example 55 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method unsetStoragePolicy.

/**
   * unset storage policy set for a given file or a directory.
   *
   * @param src file/directory path
   */
void unsetStoragePolicy(String src) throws IOException {
    final String operationName = "unsetStoragePolicy";
    HdfsFileStatus auditStat;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot unset storage policy for " + src);
        auditStat = FSDirAttrOp.unsetStoragePolicy(dir, blockManager, src);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Aggregations

AccessControlException (org.apache.hadoop.security.AccessControlException)165 Path (org.apache.hadoop.fs.Path)72 IOException (java.io.IOException)69 Test (org.junit.Test)60 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)44 FsPermission (org.apache.hadoop.fs.permission.FsPermission)41 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)35 FileSystem (org.apache.hadoop.fs.FileSystem)33 Configuration (org.apache.hadoop.conf.Configuration)25 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)21 FileNotFoundException (java.io.FileNotFoundException)19 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 UnsupportedEncodingException (java.io.UnsupportedEncodingException)11 HashMap (java.util.HashMap)10 FileStatus (org.apache.hadoop.fs.FileStatus)10 ClientResponse (com.sun.jersey.api.client.ClientResponse)9 PrivilegedAction (java.security.PrivilegedAction)9 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)9 RESTResponse (org.apache.ranger.admin.client.datatype.RESTResponse)9