Search in sources :

Example 6 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSNamesystem method startFileInt.

private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions, boolean logRetryCache) throws IOException {
    if (NameNode.stateChangeLog.isDebugEnabled()) {
        StringBuilder builder = new StringBuilder();
        builder.append("DIR* NameSystem.startFile: src=").append(src).append(", holder=").append(holder).append(", clientMachine=").append(clientMachine).append(", createParent=").append(createParent).append(", replication=").append(replication).append(", createFlag=").append(flag).append(", blockSize=").append(blockSize).append(", supportedVersions=").append(Arrays.toString(supportedVersions));
        NameNode.stateChangeLog.debug(builder.toString());
    }
    if (!DFSUtil.isValidName(src) || FSDirectory.isExactReservedName(src) || (FSDirectory.isReservedName(src) && !FSDirectory.isReservedRawName(src) && !FSDirectory.isReservedInodesName(src))) {
        throw new InvalidPathException(src);
    }
    FSPermissionChecker pc = getPermissionChecker();
    INodesInPath iip = null;
    // until we do something that might create edits
    boolean skipSync = true;
    HdfsFileStatus stat = null;
    BlocksMapUpdateInfo toRemoveBlocks = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot create file" + src);
        iip = FSDirWriteFileOp.resolvePathForStartFile(dir, pc, src, flag, createParent);
        if (!FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip)) {
            blockManager.verifyReplication(src, replication, clientMachine);
        }
        if (blockSize < minBlockSize) {
            throw new IOException("Specified block size is less than configured" + " minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY + "): " + blockSize + " < " + minBlockSize);
        }
        FileEncryptionInfo feInfo = null;
        if (provider != null) {
            EncryptionKeyInfo ezInfo = FSDirEncryptionZoneOp.getEncryptionKeyInfo(this, iip, supportedVersions);
            // and/or EZ has not mutated
            if (ezInfo != null) {
                checkOperation(OperationCategory.WRITE);
                iip = FSDirWriteFileOp.resolvePathForStartFile(dir, pc, iip.getPath(), flag, createParent);
                feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(dir, iip, ezInfo);
            }
        }
        // following might generate edits
        skipSync = false;
        toRemoveBlocks = new BlocksMapUpdateInfo();
        dir.writeLock();
        try {
            stat = FSDirWriteFileOp.startFile(this, iip, permissions, holder, clientMachine, flag, createParent, replication, blockSize, feInfo, toRemoveBlocks, logRetryCache);
        } catch (IOException e) {
            skipSync = e instanceof StandbyException;
            throw e;
        } finally {
            dir.writeUnlock();
        }
    } finally {
        writeUnlock("create");
        // They need to be sync'ed even when an exception was thrown.
        if (!skipSync) {
            getEditLog().logSync();
            if (toRemoveBlocks != null) {
                removeBlocks(toRemoveBlocks);
                toRemoveBlocks.clear();
            }
        }
    }
    return stat;
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) StandbyException(org.apache.hadoop.ipc.StandbyException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) EncryptionKeyInfo(org.apache.hadoop.hdfs.server.namenode.FSDirEncryptionZoneOp.EncryptionKeyInfo) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) InvalidPathException(org.apache.hadoop.fs.InvalidPathException)

Example 7 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSNamesystem method removeDefaultAcl.

void removeDefaultAcl(final String src) throws IOException {
    final String operationName = "removeDefaultAcl";
    HdfsFileStatus auditStat = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
        auditStat = FSDirAclOp.removeDefaultAcl(dir, src);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 8 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSNamesystem method setTimes.

/**
   * stores the modification and access time for this inode. 
   * The access time is precise up to an hour. The transaction, if needed, is
   * written to the edits log but is not flushed.
   */
void setTimes(String src, long mtime, long atime) throws IOException {
    final String operationName = "setTimes";
    HdfsFileStatus auditStat;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot set times " + src);
        auditStat = FSDirAttrOp.setTimes(dir, src, mtime, atime);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 9 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSNamesystem method createEncryptionZone.

/**
   * Create an encryption zone on directory src using the specified key.
   *
   * @param src     the path of a directory which will be the root of the
   *                encryption zone. The directory must be empty.
   * @param keyName name of a key which must be present in the configured
   *                KeyProvider.
   * @throws AccessControlException  if the caller is not the superuser.
   * @throws UnresolvedLinkException if the path can't be resolved.
   * @throws SafeModeException       if the Namenode is in safe mode.
   */
void createEncryptionZone(final String src, final String keyName, boolean logRetryCache) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException {
    final String operationName = "createEncryptionZone";
    try {
        Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir, keyName, src);
        checkSuperuserPrivilege();
        FSPermissionChecker pc = getPermissionChecker();
        checkOperation(OperationCategory.WRITE);
        final HdfsFileStatus resultingStat;
        writeLock();
        try {
            checkSuperuserPrivilege();
            checkOperation(OperationCategory.WRITE);
            checkNameNodeSafeMode("Cannot create encryption zone on " + src);
            resultingStat = FSDirEncryptionZoneOp.createEncryptionZone(dir, src, pc, metadata.getCipher(), keyName, logRetryCache);
        } finally {
            writeUnlock(operationName);
        }
        getEditLog().logSync();
        logAuditEvent(true, operationName, src, null, resultingStat);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    }
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Metadata(org.apache.hadoop.crypto.key.KeyProvider.Metadata) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Example 10 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSNamesystem method setAcl.

void setAcl(final String src, List<AclEntry> aclSpec) throws IOException {
    final String operationName = "setAcl";
    HdfsFileStatus auditStat = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot set ACL on " + src);
        auditStat = FSDirAclOp.setAcl(dir, src, aclSpec);
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, src);
        throw e;
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    logAuditEvent(true, operationName, src, null, auditStat);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6