Search in sources :

Example 41 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class RpcProgramNfs3 method commit.

@VisibleForTesting
COMMIT3Response commit(XDR xdr, Channel channel, int xid, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    COMMIT3Request request;
    try {
        request = COMMIT3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid COMMIT request");
        response.setStatus(Nfs3Status.NFS3ERR_INVAL);
        return response;
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS COMMIT fileId: " + handle.getFileId() + " offset=" + request.getOffset() + " count=" + request.getCount() + " client: " + remoteAddress);
    }
    String fileIdPath = Nfs3Utils.getFileIdPath(handle);
    Nfs3FileAttributes preOpAttr = null;
    try {
        preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
        if (preOpAttr == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), Nfs3Constant.WRITE_COMMIT_VERF);
        }
        long commitOffset = (request.getCount() == 0) ? 0 : (request.getOffset() + request.getCount());
        // Insert commit as an async request
        writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid, preOpAttr);
        return null;
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        Nfs3FileAttributes postOpAttr = null;
        try {
            postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
        } catch (IOException e1) {
            LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
        }
        WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
        int status = mapErrorStatus(e);
        return new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) COMMIT3Request(org.apache.hadoop.nfs.nfs3.request.COMMIT3Request) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 42 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class RpcProgramNfs3 method remove.

@VisibleForTesting
REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    REMOVE3Request request;
    try {
        request = REMOVE3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid REMOVE request");
        return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle dirHandle = request.getHandle();
    String fileName = request.getName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS REMOVE dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress);
    }
    String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    Nfs3FileAttributes preOpDirAttr = null;
    Nfs3FileAttributes postOpDirAttr = null;
    try {
        preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (preOpDirAttr == null) {
            LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
            return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
        }
        WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr);
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
        }
        String fileIdPath = dirFileIdPath + "/" + fileName;
        HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
        if (fstat == null) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
        }
        if (fstat.isDir()) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc);
        }
        boolean result = dfsClient.delete(fileIdPath, false);
        WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
        if (!result) {
            return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
        }
        return new REMOVE3Response(Nfs3Status.NFS3_OK, dirWcc);
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        // Try to return correct WccData
        if (postOpDirAttr == null) {
            try {
                postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
            } catch (IOException e1) {
                LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
            }
        }
        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr);
        int status = mapErrorStatus(e);
        return new REMOVE3Response(status, dirWcc);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) REMOVE3Response(org.apache.hadoop.nfs.nfs3.response.REMOVE3Response) REMOVE3Request(org.apache.hadoop.nfs.nfs3.request.REMOVE3Request) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 43 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class OpenFileCtx method processPerfectOverWrite.

/**
   * Honor 2 kinds of overwrites: 1). support some application like touch(write
   * the same content back to change mtime), 2) client somehow sends the same
   * write again in a different RPC.
   */
private WRITE3Response processPerfectOverWrite(DFSClient dfsClient, long offset, int count, WriteStableHow stableHow, byte[] data, String path, WccData wccData, IdMappingServiceProvider iug) {
    WRITE3Response response;
    // Read the content back
    byte[] readbuffer = new byte[count];
    int readCount = 0;
    FSDataInputStream fis = null;
    try {
        // Sync file data and length to avoid partial read failure
        fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    } catch (ClosedChannelException closedException) {
        LOG.info("The FSDataOutputStream has been closed. " + "Continue processing the perfect overwrite.");
    } catch (IOException e) {
        LOG.info("hsync failed when processing possible perfect overwrite, path=" + path + " error: " + e);
        return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
    }
    try {
        fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
        readCount = fis.read(offset, readbuffer, 0, count);
        if (readCount < count) {
            LOG.error("Can't read back " + count + " bytes, partial read size: " + readCount);
            return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        }
    } catch (IOException e) {
        LOG.info("Read failed when processing possible perfect overwrite, path=" + path, e);
        return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
    } finally {
        IOUtils.cleanup(LOG, fis);
    }
    // Compare with the request
    Comparator comparator = new Comparator();
    if (comparator.compare(readbuffer, 0, readCount, data, 0, count) != 0) {
        LOG.info("Perfect overwrite has different content");
        response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
    } else {
        LOG.info("Perfect overwrite has same content," + " updating the mtime, then return success");
        Nfs3FileAttributes postOpAttr = null;
        try {
            dfsClient.setTimes(path, Time.monotonicNow(), -1);
            postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
        } catch (IOException e) {
            LOG.info("Got error when processing perfect overwrite, path=" + path + " error: " + e);
            return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        }
        wccData.setPostOpAttr(postOpAttr);
        response = new WRITE3Response(Nfs3Status.NFS3_OK, wccData, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
    }
    return response;
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) IOException(java.io.IOException) WRITE3Response(org.apache.hadoop.nfs.nfs3.response.WRITE3Response) Comparator(org.apache.hadoop.io.BytesWritable.Comparator)

Example 44 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class RpcProgramNfs3 method fsstat.

@VisibleForTesting
FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    FSSTAT3Request request;
    try {
        request = FSSTAT3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid FSSTAT request");
        return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    try {
        FsStatus fsStatus = dfsClient.getDiskStatus();
        long totalBytes = fsStatus.getCapacity();
        long freeBytes = fsStatus.getRemaining();
        Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle, iug);
        if (attrs == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
        }
        long maxFsObjects = config.getLong("dfs.max.objects", 0);
        if (maxFsObjects == 0) {
            // A value of zero in HDFS indicates no limit to the number
            // of objects that dfs supports. Using Integer.MAX_VALUE instead of
            // Long.MAX_VALUE so 32bit client won't complain.
            maxFsObjects = Integer.MAX_VALUE;
        }
        return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes, freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0);
    } catch (RemoteException r) {
        LOG.warn("Exception ", r);
        IOException io = r.unwrapRemoteException();
        /**
       * AuthorizationException can be thrown if the user can't be proxy'ed.
       */
        if (io instanceof AuthorizationException) {
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_ACCES);
        } else {
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
        }
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new FSSTAT3Response(status);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) FSSTAT3Response(org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response) IOException(java.io.IOException) FSSTAT3Request(org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request) RemoteException(org.apache.hadoop.ipc.RemoteException) FsStatus(org.apache.hadoop.fs.FsStatus) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 45 with Nfs3FileAttributes

use of org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes in project hadoop by apache.

the class RpcProgramNfs3 method rmdir.

@VisibleForTesting
RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    RMDIR3Request request;
    try {
        request = RMDIR3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid RMDIR request");
        return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle dirHandle = request.getHandle();
    String fileName = request.getName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS RMDIR dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress);
    }
    String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    Nfs3FileAttributes preOpDirAttr = null;
    Nfs3FileAttributes postOpDirAttr = null;
    try {
        preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (preOpDirAttr == null) {
            LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
            return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE);
        }
        WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr);
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
        }
        String fileIdPath = dirFileIdPath + "/" + fileName;
        HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
        if (fstat == null) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
        }
        if (!fstat.isDir()) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc);
        }
        if (fstat.getChildrenNum() > 0) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, errWcc);
        }
        boolean result = dfsClient.delete(fileIdPath, false);
        WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
        if (!result) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
        }
        return new RMDIR3Response(Nfs3Status.NFS3_OK, dirWcc);
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        // Try to return correct WccData
        if (postOpDirAttr == null) {
            try {
                postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
            } catch (IOException e1) {
                LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
            }
        }
        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr);
        int status = mapErrorStatus(e);
        return new RMDIR3Response(status, dirWcc);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) RMDIR3Response(org.apache.hadoop.nfs.nfs3.response.RMDIR3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) RMDIR3Request(org.apache.hadoop.nfs.nfs3.request.RMDIR3Request) IOException(java.io.IOException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)50 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)30 DFSClient (org.apache.hadoop.hdfs.DFSClient)27 IOException (java.io.IOException)22 VisibleForTesting (com.google.common.annotations.VisibleForTesting)18 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)11 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)10 Test (org.junit.Test)9 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)8 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 COMMIT_STATUS (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS)6 CommitCtx (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx)5 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)5 RemoteException (org.apache.hadoop.ipc.RemoteException)4 Channel (org.jboss.netty.channel.Channel)4 ArrayList (java.util.ArrayList)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)2 NfsTime (org.apache.hadoop.nfs.NfsTime)2