Search in sources :

Example 11 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method mkdirs.

/**
   * Create all the necessary directories
   */
boolean mkdirs(String src, PermissionStatus permissions, boolean createParent) throws IOException {
    final String operationName = "mkdirs";
    HdfsFileStatus auditStat = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot create directory " + src);
        auditStat = FSDirMkdirOp.mkdirs(this, src, permissions, createParent);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
    return true;
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 12 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method removeDefaultAcl.

void removeDefaultAcl(final String src) throws IOException {
    final String operationName = "removeDefaultAcl";
    HdfsFileStatus auditStat = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
        auditStat = FSDirAclOp.removeDefaultAcl(dir, src);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 13 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method truncate.

/**
   * Truncate file to a lower length.
   * Truncate cannot be reverted / recovered from as it causes data loss.
   * Truncation at block boundary is atomic, otherwise it requires
   * block recovery to truncate the last block of the file.
   *
   * @return true if client does not need to wait for block recovery,
   *         false if client needs to wait for block recovery.
   */
boolean truncate(String src, long newLength, String clientName, String clientMachine, long mtime) throws IOException, UnresolvedLinkException {
    final String operationName = "truncate";
    requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE);
    final FSDirTruncateOp.TruncateResult r;
    try {
        NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src={} newLength={}", src, newLength);
        if (newLength < 0) {
            throw new HadoopIllegalArgumentException("Cannot truncate to a negative file size: " + newLength + ".");
        }
        final FSPermissionChecker pc = getPermissionChecker();
        checkOperation(OperationCategory.WRITE);
        writeLock();
        BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
        try {
            checkOperation(OperationCategory.WRITE);
            checkNameNodeSafeMode("Cannot truncate for " + src);
            r = FSDirTruncateOp.truncate(this, src, newLength, clientName, clientMachine, mtime, toRemoveBlocks, pc);
        } finally {
            writeUnlock(operationName);
        }
        getEditLog().logSync();
        if (!toRemoveBlocks.getToDeleteList().isEmpty()) {
            removeBlocks(toRemoveBlocks);
            toRemoveBlocks.clear();
        }
        logAuditEvent(true, operationName, src, null, r.getFileStatus());
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    }
    return r.getResult();
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 14 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method setTimes.

/**
   * stores the modification and access time for this inode. 
   * The access time is precise up to an hour. The transaction, if needed, is
   * written to the edits log but is not flushed.
   */
void setTimes(String src, long mtime, long atime) throws IOException {
    final String operationName = "setTimes";
    HdfsFileStatus auditStat;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot set times " + src);
        auditStat = FSDirAttrOp.setTimes(dir, src, mtime, atime);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 15 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method createEncryptionZone.

/**
   * Create an encryption zone on directory src using the specified key.
   *
   * @param src     the path of a directory which will be the root of the
   *                encryption zone. The directory must be empty.
   * @param keyName name of a key which must be present in the configured
   *                KeyProvider.
   * @throws AccessControlException  if the caller is not the superuser.
   * @throws UnresolvedLinkException if the path can't be resolved.
   * @throws SafeModeException       if the Namenode is in safe mode.
   */
void createEncryptionZone(final String src, final String keyName, boolean logRetryCache) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException {
    final String operationName = "createEncryptionZone";
    try {
        Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir, keyName, src);
        checkSuperuserPrivilege();
        FSPermissionChecker pc = getPermissionChecker();
        checkOperation(OperationCategory.WRITE);
        final HdfsFileStatus resultingStat;
        writeLock();
        try {
            checkSuperuserPrivilege();
            checkOperation(OperationCategory.WRITE);
            checkNameNodeSafeMode("Cannot create encryption zone on " + src);
            resultingStat = FSDirEncryptionZoneOp.createEncryptionZone(dir, src, pc, metadata.getCipher(), keyName, logRetryCache);
        } finally {
            writeUnlock(operationName);
        }
        getEditLog().logSync();
        logAuditEvent(true, operationName, src, null, resultingStat);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    }
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Metadata(org.apache.hadoop.crypto.key.KeyProvider.Metadata) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Aggregations

AccessControlException (org.apache.hadoop.security.AccessControlException)128 Test (org.junit.Test)59 Path (org.apache.hadoop.fs.Path)53 IOException (java.io.IOException)52 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)35 FsPermission (org.apache.hadoop.fs.permission.FsPermission)32 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)22 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)21 FileSystem (org.apache.hadoop.fs.FileSystem)19 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 Configuration (org.apache.hadoop.conf.Configuration)11 FileNotFoundException (java.io.FileNotFoundException)10 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)8 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)7 FileStatus (org.apache.hadoop.fs.FileStatus)6 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)6 Text (org.apache.hadoop.io.Text)5 InvalidToken (org.apache.hadoop.security.token.SecretManager.InvalidToken)5 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)5 ArrayList (java.util.ArrayList)4