Search in sources :

Example 16 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.

the class RpcProgramNfs3 method readdirplus.

@VisibleForTesting
READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES);
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
    }
    READDIRPLUS3Request request = null;
    try {
        request = READDIRPLUS3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid READDIRPLUS request");
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    long cookie = request.getCookie();
    if (cookie < 0) {
        LOG.error("Invalid READDIRPLUS request, with negative cookie: " + cookie);
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    long dirCount = request.getDirCount();
    if (dirCount <= 0) {
        LOG.info("Nonpositive dircount in invalid READDIRPLUS request: " + dirCount);
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    int maxCount = request.getMaxCount();
    if (maxCount <= 0) {
        LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: " + maxCount);
        return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: " + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount + " client: " + remoteAddress);
    }
    HdfsFileStatus dirStatus;
    DirectoryListing dlisting;
    Nfs3FileAttributes postOpDirAttr;
    long dotdotFileId = 0;
    HdfsFileStatus dotdotStatus = null;
    try {
        String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
        dirStatus = dfsClient.getFileInfo(dirFileIdPath);
        if (dirStatus == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (!dirStatus.isDir()) {
            LOG.error("Can't readdirplus for regular file, fileId: " + handle.getFileId());
            return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
        }
        long cookieVerf = request.getCookieVerf();
        if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
            if (aixCompatMode) {
                // The AIX NFS client misinterprets RFC-1813 and will repeatedly send
                // the same cookieverf value even across VFS-level readdir calls,
                // instead of getting a new cookieverf for every VFS-level readdir
                // call. This means that whenever a readdir call is made by an AIX NFS
                // client for a given directory, and that directory is subsequently
                // modified, thus changing its mtime, no later readdir calls will
                // succeed for that directory from AIX until the FS is
                // unmounted/remounted. See HDFS-6549 for more info.
                LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches.");
            } else {
                LOG.error("cookieverf mismatch. request cookieverf: " + cookieVerf + " dir cookieverf: " + dirStatus.getModificationTime());
                return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug), 0, null);
            }
        }
        if (cookie == 0) {
            // Get dotdot fileId
            String dotdotFileIdPath = dirFileIdPath + "/..";
            dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
            if (dotdotStatus == null) {
                // This should not happen
                throw new IOException("Can't get path for handle path: " + dotdotFileIdPath);
            }
            dotdotFileId = dotdotStatus.getFileId();
        }
        // Get the list from the resume point
        byte[] startAfter;
        if (cookie == 0) {
            startAfter = HdfsFileStatus.EMPTY_NAME;
        } else {
            String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
            startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
        }
        dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
        postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (postOpDirAttr == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
        }
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new READDIRPLUS3Response(status);
    }
    // Set up the dirents in the response
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();
    int n = (int) Math.min(fstatus.length, dirCount - 2);
    boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
    READDIRPLUS3Response.EntryPlus3[] entries;
    if (cookie == 0) {
        entries = new READDIRPLUS3Response.EntryPlus3[n + 2];
        entries[0] = new READDIRPLUS3Response.EntryPlus3(postOpDirAttr.getFileId(), ".", 0, postOpDirAttr, new FileHandle(postOpDirAttr.getFileId()));
        entries[1] = new READDIRPLUS3Response.EntryPlus3(dotdotFileId, "..", dotdotFileId, Nfs3Utils.getNfs3FileAttrFromFileStatus(dotdotStatus, iug), new FileHandle(dotdotFileId));
        for (int i = 2; i < n + 2; i++) {
            long fileId = fstatus[i - 2].getFileId();
            FileHandle childHandle = new FileHandle(fileId);
            Nfs3FileAttributes attr;
            try {
                attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
            } catch (IOException e) {
                LOG.error("Can't get file attributes for fileId: " + fileId, e);
                continue;
            }
            entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i - 2].getLocalName(), fileId, attr, childHandle);
        }
    } else {
        // Resume from last readdirplus. If the cookie is "..", the result
        // list is up the directory content since HDFS uses name as resume point.
        entries = new READDIRPLUS3Response.EntryPlus3[n];
        for (int i = 0; i < n; i++) {
            long fileId = fstatus[i].getFileId();
            FileHandle childHandle = new FileHandle(fileId);
            Nfs3FileAttributes attr;
            try {
                attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
            } catch (IOException e) {
                LOG.error("Can't get file attributes for fileId: " + fileId, e);
                continue;
            }
            entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i].getLocalName(), fileId, attr, childHandle);
        }
    }
    DirListPlus3 dirListPlus = new READDIRPLUS3Response.DirListPlus3(entries, eof);
    return new READDIRPLUS3Response(Nfs3Status.NFS3_OK, postOpDirAttr, dirStatus.getModificationTime(), dirListPlus);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) READDIRPLUS3Response(org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response) IOException(java.io.IOException) READDIRPLUS3Request(org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DirListPlus3(org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response.DirListPlus3) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 17 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.

the class RpcProgramNfs3 method read.

@VisibleForTesting
READ3Response read(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
    final String userName = securityHandler.getUser();
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(userName);
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    READ3Request request;
    try {
        request = READ3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid READ request");
        return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    long offset = request.getOffset();
    int count = request.getCount();
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset + " count: " + count + " client: " + remoteAddress);
    }
    Nfs3FileAttributes attrs;
    boolean eof;
    if (count == 0) {
        // Only do access check.
        try {
            // Don't read from cache. Client may not have read permission.
            attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
        } catch (IOException e) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Get error accessing file, fileId: " + handle.getFileId(), e);
            }
            return new READ3Response(Nfs3Status.NFS3ERR_IO);
        }
        if (attrs == null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Can't get path for fileId: " + handle.getFileId());
            }
            return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
        }
        int access = Nfs3Utils.getAccessRightsForUserGroup(securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs);
        if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
            eof = offset >= attrs.getSize();
            return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof, ByteBuffer.wrap(new byte[0]));
        } else {
            return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
        }
    }
    // In case there is buffered data for the same file, flush it. This can be
    // optimized later by reading from the cache.
    int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
    if (ret != Nfs3Status.NFS3_OK) {
        LOG.warn("commitBeforeRead didn't succeed with ret=" + ret + ". Read may not get most recent data.");
    }
    try {
        int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
        int buffSize = Math.min(rtmax, count);
        byte[] readbuffer = new byte[buffSize];
        int readCount = 0;
        /**
       * Retry exactly once because the DFSInputStream can be stale.
       */
        for (int i = 0; i < 1; ++i) {
            FSDataInputStream fis = clientCache.getDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle));
            if (fis == null) {
                return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
            }
            try {
                readCount = fis.read(offset, readbuffer, 0, count);
                metrics.incrBytesRead(readCount);
            } catch (IOException e) {
                // which requires incompatible changes.
                if (e.getMessage().equals("Stream closed")) {
                    clientCache.invalidateDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle));
                    continue;
                } else {
                    throw e;
                }
            }
        }
        attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
        if (readCount < count) {
            LOG.info("Partial read. Asked offset: " + offset + " count: " + count + " and read back: " + readCount + " file size: " + attrs.getSize());
        }
        // HDFS returns -1 for read beyond file size.
        if (readCount < 0) {
            readCount = 0;
        }
        eof = (offset + readCount) >= attrs.getSize();
        return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof, ByteBuffer.wrap(readbuffer));
    } catch (IOException e) {
        LOG.warn("Read error: " + e.getClass() + " offset: " + offset + " count: " + count, e);
        int status = mapErrorStatus(e);
        return new READ3Response(status);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) READ3Request(org.apache.hadoop.nfs.nfs3.request.READ3Request) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) IOException(java.io.IOException) READ3Response(org.apache.hadoop.nfs.nfs3.response.READ3Response) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 18 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.

the class RpcProgramNfs3 method getattr.

@VisibleForTesting
GETATTR3Response getattr(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    GETATTR3Request request;
    try {
        request = GETATTR3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid GETATTR request");
        response.setStatus(Nfs3Status.NFS3ERR_INVAL);
        return response;
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("GETATTR for fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    Nfs3FileAttributes attrs = null;
    try {
        attrs = writeManager.getFileAttr(dfsClient, handle, iug);
    } catch (RemoteException r) {
        LOG.warn("Exception ", r);
        IOException io = r.unwrapRemoteException();
        /**
       * AuthorizationException can be thrown if the user can't be proxy'ed.
       */
        if (io instanceof AuthorizationException) {
            return new GETATTR3Response(Nfs3Status.NFS3ERR_ACCES);
        } else {
            return new GETATTR3Response(Nfs3Status.NFS3ERR_IO);
        }
    } catch (IOException e) {
        LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e);
        int status = mapErrorStatus(e);
        response.setStatus(status);
        return response;
    }
    if (attrs == null) {
        LOG.error("Can't get path for fileId: " + handle.getFileId());
        response.setStatus(Nfs3Status.NFS3ERR_STALE);
        return response;
    }
    response.setPostOpAttr(attrs);
    return response;
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) GETATTR3Response(org.apache.hadoop.nfs.nfs3.response.GETATTR3Response) RemoteException(org.apache.hadoop.ipc.RemoteException) GETATTR3Request(org.apache.hadoop.nfs.nfs3.request.GETATTR3Request) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 19 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.

the class RpcProgramNfs3 method symlink.

@VisibleForTesting
SYMLINK3Response symlink(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    SYMLINK3Response response = new SYMLINK3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    SYMLINK3Request request;
    try {
        request = SYMLINK3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid SYMLINK request");
        response.setStatus(Nfs3Status.NFS3ERR_INVAL);
        return response;
    }
    FileHandle dirHandle = request.getHandle();
    String name = request.getName();
    String symData = request.getSymData();
    String linkDirIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    // Don't do any name check to source path, just leave it to HDFS
    String linkIdPath = linkDirIdPath + "/" + name;
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS SYMLINK, target: " + symData + " link: " + linkIdPath + " client: " + remoteAddress);
    }
    try {
        WccData dirWcc = response.getDirWcc();
        WccAttr preOpAttr = Nfs3Utils.getWccAttr(dfsClient, linkDirIdPath);
        dirWcc.setPreOpAttr(preOpAttr);
        dfsClient.createSymlink(symData, linkIdPath, false);
        // Set symlink attr is considered as to change the attr of the target
        // file. So no need to set symlink attr here after it's created.
        HdfsFileStatus linkstat = dfsClient.getFileLinkInfo(linkIdPath);
        Nfs3FileAttributes objAttr = Nfs3Utils.getNfs3FileAttrFromFileStatus(linkstat, iug);
        dirWcc.setPostOpAttr(Nfs3Utils.getFileAttr(dfsClient, linkDirIdPath, iug));
        return new SYMLINK3Response(Nfs3Status.NFS3_OK, new FileHandle(objAttr.getFileId()), objAttr, dirWcc);
    } catch (IOException e) {
        LOG.warn("Exception: " + e);
        int status = mapErrorStatus(e);
        response.setStatus(status);
        return response;
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) SYMLINK3Request(org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) WccAttr(org.apache.hadoop.nfs.nfs3.response.WccAttr) IOException(java.io.IOException) SYMLINK3Response(org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 20 with VisibleForTesting

use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.

the class RpcProgramNfs3 method pathconf.

@VisibleForTesting
PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    PATHCONF3Request request;
    try {
        request = PATHCONF3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid PATHCONF request");
        return new PATHCONF3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    Nfs3FileAttributes attrs;
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS PATHCONF fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    try {
        attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
        if (attrs == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new PATHCONF3Response(Nfs3Status.NFS3ERR_STALE);
        }
        return new PATHCONF3Response(Nfs3Status.NFS3_OK, attrs, 0, HdfsServerConstants.MAX_PATH_LENGTH, true, false, false, true);
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new PATHCONF3Response(status);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) PATHCONF3Request(org.apache.hadoop.nfs.nfs3.request.PATHCONF3Request) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) PATHCONF3Response(org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response) IOException(java.io.IOException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

VisibleForTesting (com.google.common.annotations.VisibleForTesting)760 IOException (java.io.IOException)128 ArrayList (java.util.ArrayList)52 Path (java.nio.file.Path)46 Map (java.util.Map)46 File (java.io.File)40 HashMap (java.util.HashMap)34 Path (org.apache.hadoop.fs.Path)30 ImmutableList (com.google.common.collect.ImmutableList)28 Matcher (java.util.regex.Matcher)26 List (java.util.List)24 SourcePath (com.facebook.buck.rules.SourcePath)20 ImmutableMap (com.google.common.collect.ImmutableMap)20 HashSet (java.util.HashSet)19 FileStatus (org.apache.hadoop.fs.FileStatus)19 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)19 DFSClient (org.apache.hadoop.hdfs.DFSClient)18 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)18 ImmutableSet (com.google.common.collect.ImmutableSet)16 CigarElement (htsjdk.samtools.CigarElement)13