Search in sources :

Example 1 with READDIR3Response

use of org.apache.hadoop.nfs.nfs3.response.READDIR3Response in project hadoop by apache.

the class RpcProgramNfs3 method readdir.

public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    READDIR3Request request;
    try {
        request = READDIR3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid READDIR request");
        return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    long cookie = request.getCookie();
    if (cookie < 0) {
        LOG.error("Invalid READDIR request, with negative cookie: " + cookie);
        return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    long count = request.getCount();
    if (count <= 0) {
        LOG.info("Nonpositive count in invalid READDIR request: " + count);
        return new READDIR3Response(Nfs3Status.NFS3_OK);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: " + cookie + " count: " + count + " client: " + remoteAddress);
    }
    HdfsFileStatus dirStatus;
    DirectoryListing dlisting;
    Nfs3FileAttributes postOpAttr;
    long dotdotFileId = 0;
    try {
        String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
        dirStatus = dfsClient.getFileInfo(dirFileIdPath);
        if (dirStatus == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (!dirStatus.isDir()) {
            LOG.error("Can't readdir for regular file, fileId: " + handle.getFileId());
            return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
        }
        long cookieVerf = request.getCookieVerf();
        if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
            if (aixCompatMode) {
                // The AIX NFS client misinterprets RFC-1813 and will repeatedly send
                // the same cookieverf value even across VFS-level readdir calls,
                // instead of getting a new cookieverf for every VFS-level readdir
                // call, and reusing the cookieverf only in the event that multiple
                // incremental NFS-level readdir calls must be made to fetch all of
                // the directory entries. This means that whenever a readdir call is
                // made by an AIX NFS client for a given directory, and that directory
                // is subsequently modified, thus changing its mtime, no later readdir
                // calls will succeed from AIX for that directory until the FS is
                // unmounted/remounted. See HDFS-6549 for more info.
                LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches.");
            } else {
                LOG.error("CookieVerf mismatch. request cookieVerf: " + cookieVerf + " dir cookieVerf: " + dirStatus.getModificationTime());
                return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug));
            }
        }
        if (cookie == 0) {
            // Get dotdot fileId
            String dotdotFileIdPath = dirFileIdPath + "/..";
            HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
            if (dotdotStatus == null) {
                // This should not happen
                throw new IOException("Can't get path for handle path: " + dotdotFileIdPath);
            }
            dotdotFileId = dotdotStatus.getFileId();
        }
        // Get the list from the resume point
        byte[] startAfter;
        if (cookie == 0) {
            startAfter = HdfsFileStatus.EMPTY_NAME;
        } else {
            String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
            startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
        }
        dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
        postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (postOpAttr == null) {
            LOG.error("Can't get path for fileId: " + handle.getFileId());
            return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
        }
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new READDIR3Response(status);
    }
    /**
     * Set up the dirents in the response. fileId is used as the cookie with one
     * exception. Linux client can either be stuck with "ls" command (on REHL)
     * or report "Too many levels of symbolic links" (Ubuntu).
     *
     * The problem is that, only two items returned, "." and ".." when the
     * namespace is empty. Both of them are "/" with the same cookie(root
     * fileId). Linux client doesn't think such a directory is a real directory.
     * Even though NFS protocol specifies cookie is an opaque data, Linux client
     * somehow doesn't like an empty dir returns same cookie for both "." and
     * "..".
     *
     * The workaround is to use 0 as the cookie for "." and always return "." as
     * the first entry in readdir/readdirplus response.
     */
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();
    int n = (int) Math.min(fstatus.length, count - 2);
    boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
    Entry3[] entries;
    if (cookie == 0) {
        entries = new Entry3[n + 2];
        entries[0] = new READDIR3Response.Entry3(postOpAttr.getFileId(), ".", 0);
        entries[1] = new READDIR3Response.Entry3(dotdotFileId, "..", dotdotFileId);
        for (int i = 2; i < n + 2; i++) {
            entries[i] = new READDIR3Response.Entry3(fstatus[i - 2].getFileId(), fstatus[i - 2].getLocalName(), fstatus[i - 2].getFileId());
        }
    } else {
        // Resume from last readdirplus. If the cookie is "..", the result
        // list is up the directory content since HDFS uses name as resume point.    
        entries = new Entry3[n];
        for (int i = 0; i < n; i++) {
            entries[i] = new READDIR3Response.Entry3(fstatus[i].getFileId(), fstatus[i].getLocalName(), fstatus[i].getFileId());
        }
    }
    DirList3 dirList = new READDIR3Response.DirList3(entries, eof);
    return new READDIR3Response(Nfs3Status.NFS3_OK, postOpAttr, dirStatus.getModificationTime(), dirList);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) Entry3(org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3) READDIR3Response(org.apache.hadoop.nfs.nfs3.response.READDIR3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) DirList3(org.apache.hadoop.nfs.nfs3.response.READDIR3Response.DirList3) READDIR3Request(org.apache.hadoop.nfs.nfs3.request.READDIR3Request) Entry3(org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus)

Example 2 with READDIR3Response

use of org.apache.hadoop.nfs.nfs3.response.READDIR3Response in project hadoop by apache.

the class TestReaddir method testReaddirBasic.

@Test
public void testReaddirBasic() throws IOException {
    // Get inodeId of /tmp
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    // Create related part of the XDR request
    XDR xdr_req = new XDR();
    FileHandle handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    // cookie
    xdr_req.writeLongAsHyper(0);
    // verifier
    xdr_req.writeLongAsHyper(0);
    // count
    xdr_req.writeInt(100);
    READDIR3Response response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    List<Entry3> dirents = response.getDirList().getEntries();
    // inculding dot, dotdot
    assertTrue(dirents.size() == 5);
    // Test start listing from f2
    status = nn.getRpcServer().getFileInfo(testdir + "/f2");
    long f2Id = status.getFileId();
    // Create related part of the XDR request
    xdr_req = new XDR();
    handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    // cookie
    xdr_req.writeLongAsHyper(f2Id);
    // verifier
    xdr_req.writeLongAsHyper(0);
    // count
    xdr_req.writeInt(100);
    response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    dirents = response.getDirList().getEntries();
    assertTrue(dirents.size() == 1);
    Entry3 entry = dirents.get(0);
    assertTrue(entry.getName().equals("f3"));
    // When the cookie is deleted, list starts over no including dot, dotdot
    hdfs.delete(new Path(testdir + "/f2"), false);
    response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    dirents = response.getDirList().getEntries();
    // No dot, dotdot
    assertTrue(dirents.size() == 2);
}
Also used : Path(org.apache.hadoop.fs.Path) Entry3(org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) READDIR3Response(org.apache.hadoop.nfs.nfs3.response.READDIR3Response) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) Test(org.junit.Test)

Example 3 with READDIR3Response

use of org.apache.hadoop.nfs.nfs3.response.READDIR3Response in project hadoop by apache.

the class TestRpcProgramNfs3 method testReaddir.

@Test(timeout = 60000)
public void testReaddir() throws Exception {
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    FileHandle handle = new FileHandle(dirId);
    XDR xdr_req = new XDR();
    READDIR3Request req = new READDIR3Request(handle, 0, 0, 100);
    req.serialize(xdr_req);
    // Attempt by an unpriviledged user should fail.
    READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
    // Attempt by a priviledged user should pass.
    READDIR3Response response2 = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus());
}
Also used : READDIR3Request(org.apache.hadoop.nfs.nfs3.request.READDIR3Request) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) READDIR3Response(org.apache.hadoop.nfs.nfs3.response.READDIR3Response) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) Test(org.junit.Test)

Example 4 with READDIR3Response

use of org.apache.hadoop.nfs.nfs3.response.READDIR3Response in project hadoop by apache.

the class READDIR3Response method deserialize.

public static READDIR3Response deserialize(XDR xdr) {
    int status = xdr.readInt();
    xdr.readBoolean();
    Nfs3FileAttributes postOpDirAttr = Nfs3FileAttributes.deserialize(xdr);
    long cookieVerf = 0;
    ArrayList<Entry3> entries = new ArrayList<Entry3>();
    DirList3 dirList = null;
    if (status == Nfs3Status.NFS3_OK) {
        cookieVerf = xdr.readHyper();
        while (xdr.readBoolean()) {
            Entry3 e = Entry3.deserialzie(xdr);
            entries.add(e);
        }
        boolean eof = xdr.readBoolean();
        Entry3[] allEntries = new Entry3[entries.size()];
        entries.toArray(allEntries);
        dirList = new DirList3(allEntries, eof);
    }
    return new READDIR3Response(status, postOpDirAttr, cookieVerf, dirList);
}
Also used : Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) ArrayList(java.util.ArrayList)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)3 READDIR3Response (org.apache.hadoop.nfs.nfs3.response.READDIR3Response)3 InetSocketAddress (java.net.InetSocketAddress)2 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)2 READDIR3Request (org.apache.hadoop.nfs.nfs3.request.READDIR3Request)2 Entry3 (org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3)2 XDR (org.apache.hadoop.oncrpc.XDR)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 Path (org.apache.hadoop.fs.Path)1 DFSClient (org.apache.hadoop.hdfs.DFSClient)1 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)1 DirList3 (org.apache.hadoop.nfs.nfs3.response.READDIR3Response.DirList3)1