Search in sources :

Example 46 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method removeDefaultAcl.

void removeDefaultAcl(final String src) throws IOException {
    final String operationName = "removeDefaultAcl";
    HdfsFileStatus auditStat = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
        auditStat = FSDirAclOp.removeDefaultAcl(dir, src);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 47 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method truncate.

/**
   * Truncate file to a lower length.
   * Truncate cannot be reverted / recovered from as it causes data loss.
   * Truncation at block boundary is atomic, otherwise it requires
   * block recovery to truncate the last block of the file.
   *
   * @return true if client does not need to wait for block recovery,
   *         false if client needs to wait for block recovery.
   */
boolean truncate(String src, long newLength, String clientName, String clientMachine, long mtime) throws IOException, UnresolvedLinkException {
    final String operationName = "truncate";
    requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE);
    final FSDirTruncateOp.TruncateResult r;
    try {
        NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src={} newLength={}", src, newLength);
        if (newLength < 0) {
            throw new HadoopIllegalArgumentException("Cannot truncate to a negative file size: " + newLength + ".");
        }
        final FSPermissionChecker pc = getPermissionChecker();
        checkOperation(OperationCategory.WRITE);
        writeLock();
        BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
        try {
            checkOperation(OperationCategory.WRITE);
            checkNameNodeSafeMode("Cannot truncate for " + src);
            r = FSDirTruncateOp.truncate(this, src, newLength, clientName, clientMachine, mtime, toRemoveBlocks, pc);
        } finally {
            writeUnlock(operationName);
        }
        getEditLog().logSync();
        if (!toRemoveBlocks.getToDeleteList().isEmpty()) {
            removeBlocks(toRemoveBlocks);
            toRemoveBlocks.clear();
        }
        logAuditEvent(true, operationName, src, null, r.getFileStatus());
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    }
    return r.getResult();
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 48 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method setTimes.

/**
   * stores the modification and access time for this inode. 
   * The access time is precise up to an hour. The transaction, if needed, is
   * written to the edits log but is not flushed.
   */
void setTimes(String src, long mtime, long atime) throws IOException {
    final String operationName = "setTimes";
    HdfsFileStatus auditStat;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot set times " + src);
        auditStat = FSDirAttrOp.setTimes(dir, src, mtime, atime);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 49 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method createEncryptionZone.

/**
   * Create an encryption zone on directory src using the specified key.
   *
   * @param src     the path of a directory which will be the root of the
   *                encryption zone. The directory must be empty.
   * @param keyName name of a key which must be present in the configured
   *                KeyProvider.
   * @throws AccessControlException  if the caller is not the superuser.
   * @throws UnresolvedLinkException if the path can't be resolved.
   * @throws SafeModeException       if the Namenode is in safe mode.
   */
void createEncryptionZone(final String src, final String keyName, boolean logRetryCache) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException {
    final String operationName = "createEncryptionZone";
    try {
        Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir, keyName, src);
        checkSuperuserPrivilege();
        FSPermissionChecker pc = getPermissionChecker();
        checkOperation(OperationCategory.WRITE);
        final HdfsFileStatus resultingStat;
        writeLock();
        try {
            checkSuperuserPrivilege();
            checkOperation(OperationCategory.WRITE);
            checkNameNodeSafeMode("Cannot create encryption zone on " + src);
            resultingStat = FSDirEncryptionZoneOp.createEncryptionZone(dir, src, pc, metadata.getCipher(), keyName, logRetryCache);
        } finally {
            writeUnlock(operationName);
        }
        getEditLog().logSync();
        logAuditEvent(true, operationName, src, null, resultingStat);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    }
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Metadata(org.apache.hadoop.crypto.key.KeyProvider.Metadata) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 50 with AccessControlException

use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.

the class FSNamesystem method setAcl.

void setAcl(final String src, List<AclEntry> aclSpec) throws IOException {
    final String operationName = "setAcl";
    HdfsFileStatus auditStat = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot set ACL on " + src);
        auditStat = FSDirAclOp.setAcl(dir, src, aclSpec);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Aggregations

AccessControlException (org.apache.hadoop.security.AccessControlException)165 Path (org.apache.hadoop.fs.Path)72 IOException (java.io.IOException)69 Test (org.junit.Test)60 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)44 FsPermission (org.apache.hadoop.fs.permission.FsPermission)41 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)35 FileSystem (org.apache.hadoop.fs.FileSystem)33 Configuration (org.apache.hadoop.conf.Configuration)25 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)21 FileNotFoundException (java.io.FileNotFoundException)19 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)14 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 UnsupportedEncodingException (java.io.UnsupportedEncodingException)11 HashMap (java.util.HashMap)10 FileStatus (org.apache.hadoop.fs.FileStatus)10 ClientResponse (com.sun.jersey.api.client.ClientResponse)9 PrivilegedAction (java.security.PrivilegedAction)9 ParentNotDirectoryException (org.apache.hadoop.fs.ParentNotDirectoryException)9 RESTResponse (org.apache.ranger.admin.client.datatype.RESTResponse)9