Search in sources :

Example 71 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class JsonUtilClient method toFileStatus.

/** Convert a Json map to a HdfsFileStatus object. */
static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
    if (json == null) {
        return null;
    }
    final Map<?, ?> m = includesType ? (Map<?, ?>) json.get(FileStatus.class.getSimpleName()) : json;
    final String localName = (String) m.get("pathSuffix");
    final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
    final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK ? null : DFSUtilClient.string2Bytes((String) m.get("symlink"));
    final long len = ((Number) m.get("length")).longValue();
    final String owner = (String) m.get("owner");
    final String group = (String) m.get("group");
    final FsPermission permission = toFsPermission((String) m.get("permission"), (Boolean) m.get("aclBit"), (Boolean) m.get("encBit"));
    final long aTime = ((Number) m.get("accessTime")).longValue();
    final long mTime = ((Number) m.get("modificationTime")).longValue();
    final long blockSize = ((Number) m.get("blockSize")).longValue();
    final short replication = ((Number) m.get("replication")).shortValue();
    final long fileId = m.containsKey("fileId") ? ((Number) m.get("fileId")).longValue() : HdfsConstants.GRANDFATHER_INODE_ID;
    final int childrenNum = getInt(m, "childrenNum", -1);
    final byte storagePolicy = m.containsKey("storagePolicy") ? (byte) ((Number) m.get("storagePolicy")).longValue() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
    return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize, mTime, aTime, permission, owner, group, symlink, DFSUtilClient.string2Bytes(localName), fileId, childrenNum, null, storagePolicy, null);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 72 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class RpcProgramNfs3 method remove.

@VisibleForTesting
REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    REMOVE3Request request;
    try {
        request = REMOVE3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid REMOVE request");
        return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle dirHandle = request.getHandle();
    String fileName = request.getName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS REMOVE dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress);
    }
    String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    Nfs3FileAttributes preOpDirAttr = null;
    Nfs3FileAttributes postOpDirAttr = null;
    try {
        preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (preOpDirAttr == null) {
            LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
            return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
        }
        WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr);
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
        }
        String fileIdPath = dirFileIdPath + "/" + fileName;
        HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
        if (fstat == null) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
        }
        if (fstat.isDir()) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc);
        }
        boolean result = dfsClient.delete(fileIdPath, false);
        WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
        if (!result) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
        }
        return new REMOVE3Response(Nfs3Status.NFS3_OK, dirWcc);
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        // Try to return correct WccData
        if (postOpDirAttr == null) {
            try {
                postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
            } catch (IOException e1) {
                LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
            }
        }
        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr);
        int status = mapErrorStatus(e);
        return new REMOVE3Response(status, dirWcc);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) REMOVE3Response(org.apache.hadoop.nfs.nfs3.response.REMOVE3Response) REMOVE3Request(org.apache.hadoop.nfs.nfs3.request.REMOVE3Request) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 73 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class RpcProgramNfs3 method rmdir.

@VisibleForTesting
RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    RMDIR3Request request;
    try {
        request = RMDIR3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid RMDIR request");
        return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle dirHandle = request.getHandle();
    String fileName = request.getName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS RMDIR dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress);
    }
    String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    Nfs3FileAttributes preOpDirAttr = null;
    Nfs3FileAttributes postOpDirAttr = null;
    try {
        preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (preOpDirAttr == null) {
            LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
            return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE);
        }
        WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr);
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
        }
        String fileIdPath = dirFileIdPath + "/" + fileName;
        HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
        if (fstat == null) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
        }
        if (!fstat.isDir()) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc);
        }
        if (fstat.getChildrenNum() > 0) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, errWcc);
        }
        boolean result = dfsClient.delete(fileIdPath, false);
        WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
        if (!result) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
        }
        return new RMDIR3Response(Nfs3Status.NFS3_OK, dirWcc);
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        // Try to return correct WccData
        if (postOpDirAttr == null) {
            try {
                postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
            } catch (IOException e1) {
                LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
            }
        }
        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr);
        int status = mapErrorStatus(e);
        return new RMDIR3Response(status, dirWcc);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) RMDIR3Response(org.apache.hadoop.nfs.nfs3.response.RMDIR3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) RMDIR3Request(org.apache.hadoop.nfs.nfs3.request.RMDIR3Request) IOException(java.io.IOException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 74 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestClientAccessPrivilege method testClientAccessPrivilegeForRemove.

@Test(timeout = 60000)
public void testClientAccessPrivilegeForRemove() throws Exception {
    // Configure ro access for nfs1 service
    config.set("dfs.nfs.exports.allowed.hosts", "* ro");
    // Start nfs
    Nfs3 nfs = new Nfs3(config);
    nfs.startServiceInternal(false);
    RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
    // Create a remove request
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    XDR xdr_req = new XDR();
    FileHandle handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    xdr_req.writeString("f1");
    // Remove operation
    REMOVE3Response response = nfsd.remove(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    // Assert on return code
    assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response.getStatus());
}
Also used : FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) REMOVE3Response(org.apache.hadoop.nfs.nfs3.response.REMOVE3Response) Test(org.junit.Test)

Example 75 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class RpcProgramMountd method mnt.

@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
    if (hostsMatcher == null) {
        return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null);
    }
    AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
    if (accessPrivilege == AccessPrivilege.NONE) {
        return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null);
    }
    String path = xdr.readString();
    if (LOG.isDebugEnabled()) {
        LOG.debug("MOUNT MNT path: " + path + " client: " + client);
    }
    String host = client.getHostName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Got host: " + host + " path: " + path);
    }
    if (!exports.contains(path)) {
        LOG.info("Path " + path + " is not shared.");
        MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
        return out;
    }
    FileHandle handle = null;
    try {
        HdfsFileStatus exFileStatus = dfsClient.getFileInfo(path);
        handle = new FileHandle(exFileStatus.getFileId());
    } catch (IOException e) {
        LOG.error("Can't get handle for export:" + path, e);
        MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
        return out;
    }
    assert (handle != null);
    LOG.info("Giving handle (fileId:" + handle.getFileId() + ") to client for export " + path);
    mounts.add(new MountEntry(host, path));
    MountResponse.writeMNTResponse(Nfs3Status.NFS3_OK, out, xid, handle.getContent());
    return out;
}
Also used : MountEntry(org.apache.hadoop.mount.MountEntry) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) AccessPrivilege(org.apache.hadoop.nfs.AccessPrivilege)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6