Search in sources :

Example 51 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class RpcProgramNfs3 method setattr.

@VisibleForTesting
SETATTR3Response setattr(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    SETATTR3Request request;
    try {
        request = SETATTR3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid SETATTR request");
        response.setStatus(Nfs3Status.NFS3ERR_INVAL);
        return response;
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS SETATTR fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    if (request.getAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
        LOG.error("Setting file size is not supported when setattr, fileId: " + handle.getFileId());
        response.setStatus(Nfs3Status.NFS3ERR_INVAL);
        return response;
    }
    String fileIdPath = Nfs3Utils.getFileIdPath(handle);
    Nfs3FileAttributes preOpAttr = null;
    try {
        preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
        if (preOpAttr == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            response.setStatus(Nfs3Status.NFS3ERR_STALE);
            return response;
        }
        WccAttr preOpWcc = Nfs3Utils.getWccAttr(preOpAttr);
        if (request.isCheck()) {
            if (!preOpAttr.getCtime().equals(request.getCtime())) {
                WccData wccData = new WccData(preOpWcc, preOpAttr);
                return new SETATTR3Response(Nfs3Status.NFS3ERR_NOT_SYNC, wccData);
            }
        }
        // check the write access privilege
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(preOpWcc, preOpAttr));
        }
        setattrInternal(dfsClient, fileIdPath, request.getAttr(), true);
        Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
        WccData wccData = new WccData(preOpWcc, postOpAttr);
        return new SETATTR3Response(Nfs3Status.NFS3_OK, wccData);
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        WccData wccData = null;
        try {
            wccData = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpAttr), dfsClient, fileIdPath, iug);
        } catch (IOException e1) {
            LOG.info("Can't get postOpAttr for fileIdPath: " + fileIdPath, e1);
        }
        int status = mapErrorStatus(e);
        return new SETATTR3Response(status, wccData);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) WccAttr(org.apache.hadoop.nfs.nfs3.response.WccAttr) SETATTR3Response(org.apache.hadoop.nfs.nfs3.response.SETATTR3Response) SETATTR3Request(org.apache.hadoop.nfs.nfs3.request.SETATTR3Request) IOException(java.io.IOException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 52 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class RpcProgramNfs3 method commit.

@VisibleForTesting
COMMIT3Response commit(XDR xdr, Channel channel, int xid, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    COMMIT3Request request;
    try {
        request = COMMIT3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid COMMIT request");
        response.setStatus(Nfs3Status.NFS3ERR_INVAL);
        return response;
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS COMMIT fileId: " + handle.getFileId() + " offset=" + request.getOffset() + " count=" + request.getCount() + " client: " + remoteAddress);
    }
    String fileIdPath = Nfs3Utils.getFileIdPath(handle);
    Nfs3FileAttributes preOpAttr = null;
    try {
        preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
        if (preOpAttr == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), Nfs3Constant.WRITE_COMMIT_VERF);
        }
        long commitOffset = (request.getCount() == 0) ? 0 : (request.getOffset() + request.getCount());
        // Insert commit as an async request
        writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid, preOpAttr);
        return null;
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        Nfs3FileAttributes postOpAttr = null;
        try {
            postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
        } catch (IOException e1) {
            LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
        }
        WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
        int status = mapErrorStatus(e);
        return new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) COMMIT3Request(org.apache.hadoop.nfs.nfs3.request.COMMIT3Request) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 53 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class RpcProgramNfs3 method remove.

@VisibleForTesting
REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    REMOVE3Request request;
    try {
        request = REMOVE3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid REMOVE request");
        return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle dirHandle = request.getHandle();
    String fileName = request.getName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS REMOVE dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress);
    }
    String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    Nfs3FileAttributes preOpDirAttr = null;
    Nfs3FileAttributes postOpDirAttr = null;
    try {
        preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (preOpDirAttr == null) {
            LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
            return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
        }
        WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr);
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
        }
        String fileIdPath = dirFileIdPath + "/" + fileName;
        HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
        if (fstat == null) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
        }
        if (fstat.isDir()) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc);
        }
        boolean result = dfsClient.delete(fileIdPath, false);
        WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
        if (!result) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
        }
        return new REMOVE3Response(Nfs3Status.NFS3_OK, dirWcc);
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        // Try to return correct WccData
        if (postOpDirAttr == null) {
            try {
                postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
            } catch (IOException e1) {
                LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
            }
        }
        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr);
        int status = mapErrorStatus(e);
        return new REMOVE3Response(status, dirWcc);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) REMOVE3Response(org.apache.hadoop.nfs.nfs3.response.REMOVE3Response) REMOVE3Request(org.apache.hadoop.nfs.nfs3.request.REMOVE3Request) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 54 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class OpenFileCtxCache method scan.

@VisibleForTesting
void scan(long streamTimeout) {
    ArrayList<OpenFileCtx> ctxToRemove = new ArrayList<OpenFileCtx>();
    Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet().iterator();
    if (LOG.isTraceEnabled()) {
        LOG.trace("openFileMap size:" + size());
    }
    while (it.hasNext()) {
        Entry<FileHandle, OpenFileCtx> pairs = it.next();
        FileHandle handle = pairs.getKey();
        OpenFileCtx ctx = pairs.getValue();
        if (!ctx.streamCleanup(handle.getFileId(), streamTimeout)) {
            continue;
        }
        // Check it again inside lock before removing
        synchronized (this) {
            OpenFileCtx ctx2 = openFileMap.get(handle);
            if (ctx2 != null) {
                if (ctx2.streamCleanup(handle.getFileId(), streamTimeout)) {
                    openFileMap.remove(handle);
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("After remove stream " + handle.getFileId() + ", the stream number:" + size());
                    }
                    ctxToRemove.add(ctx2);
                }
            }
        }
    }
    // Invoke the cleanup outside the lock
    for (OpenFileCtx ofc : ctxToRemove) {
        ofc.cleanup();
    }
}
Also used : Entry(java.util.Map.Entry) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) ArrayList(java.util.ArrayList) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 55 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class RpcProgramNfs3 method fsstat.

@VisibleForTesting
FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    FSSTAT3Request request;
    try {
        request = FSSTAT3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid FSSTAT request");
        return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    try {
        FsStatus fsStatus = dfsClient.getDiskStatus();
        long totalBytes = fsStatus.getCapacity();
        long freeBytes = fsStatus.getRemaining();
        Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle, iug);
        if (attrs == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
        }
        long maxFsObjects = config.getLong("dfs.max.objects", 0);
        if (maxFsObjects == 0) {
            // A value of zero in HDFS indicates no limit to the number
            // of objects that dfs supports. Using Integer.MAX_VALUE instead of
            // Long.MAX_VALUE so 32bit client won't complain.
            maxFsObjects = Integer.MAX_VALUE;
        }
        return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes, freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0);
    } catch (RemoteException r) {
        LOG.warn("Exception ", r);
        IOException io = r.unwrapRemoteException();
        /**
       * AuthorizationException can be thrown if the user can't be proxy'ed.
       */
        if (io instanceof AuthorizationException) {
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_ACCES);
        } else {
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
        }
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new FSSTAT3Response(status);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) FSSTAT3Response(org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response) IOException(java.io.IOException) FSSTAT3Request(org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request) RemoteException(org.apache.hadoop.ipc.RemoteException) FsStatus(org.apache.hadoop.fs.FsStatus) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)79 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)34 XDR (org.apache.hadoop.oncrpc.XDR)34 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)31 Test (org.junit.Test)31 InetSocketAddress (java.net.InetSocketAddress)28 DFSClient (org.apache.hadoop.hdfs.DFSClient)26 IOException (java.io.IOException)24 VisibleForTesting (com.google.common.annotations.VisibleForTesting)19 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)12 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)11 WRITE3Request (org.apache.hadoop.nfs.nfs3.request.WRITE3Request)8 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)7 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)6 CREATE3Request (org.apache.hadoop.nfs.nfs3.request.CREATE3Request)6 READ3Request (org.apache.hadoop.nfs.nfs3.request.READ3Request)5 CREATE3Response (org.apache.hadoop.nfs.nfs3.response.CREATE3Response)5 READ3Response (org.apache.hadoop.nfs.nfs3.response.READ3Response)5 WRITE3Response (org.apache.hadoop.nfs.nfs3.response.WRITE3Response)5 VerifierNone (org.apache.hadoop.oncrpc.security.VerifierNone)5