Search in sources :

Example 21 with WccData

use of org.apache.hadoop.nfs.nfs3.response.WccData in project hadoop by apache.

the class OpenFileCtx method processCommits.

private void processCommits(long offset) {
    Preconditions.checkState(offset > 0);
    long flushedOffset = getFlushedOffset();
    Entry<Long, CommitCtx> entry = pendingCommits.firstEntry();
    if (entry == null || entry.getValue().offset > flushedOffset) {
        return;
    }
    // Now do sync for the ready commits
    int status = Nfs3Status.NFS3ERR_IO;
    try {
        // Sync file data and length
        fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
        status = Nfs3Status.NFS3_OK;
    } catch (ClosedChannelException cce) {
        if (!pendingWrites.isEmpty()) {
            LOG.error("Can't sync for fileId: " + latestAttr.getFileId() + ". Channel closed with writes pending.", cce);
        }
        status = Nfs3Status.NFS3ERR_IO;
    } catch (IOException e) {
        LOG.error("Got stream error during data sync: ", e);
        // Do nothing. Stream will be closed eventually by StreamMonitor.
        status = Nfs3Status.NFS3ERR_IO;
    }
    // Update latestAttr
    try {
        latestAttr = Nfs3Utils.getFileAttr(client, Nfs3Utils.getFileIdPath(latestAttr.getFileId()), iug);
    } catch (IOException e) {
        LOG.error("Can't get new file attr, fileId: " + latestAttr.getFileId(), e);
        status = Nfs3Status.NFS3ERR_IO;
    }
    if (latestAttr.getSize() != offset) {
        LOG.error("After sync, the expect file size: " + offset + ", however actual file size is: " + latestAttr.getSize());
        status = Nfs3Status.NFS3ERR_IO;
    }
    WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr);
    // Send response for the ready commits
    while (entry != null && entry.getValue().offset <= flushedOffset) {
        pendingCommits.remove(entry.getKey());
        CommitCtx commit = entry.getValue();
        COMMIT3Response response = new COMMIT3Response(status, wccData, Nfs3Constant.WRITE_COMMIT_VERF);
        RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(commit.startTime));
        Nfs3Utils.writeChannelCommit(commit.getChannel(), response.serialize(new XDR(), commit.getXid(), new VerifierNone()), commit.getXid());
        if (LOG.isDebugEnabled()) {
            LOG.debug("FileId: " + latestAttr.getFileId() + " Service time: " + Nfs3Utils.getElapsedTime(commit.startTime) + "ns. Sent response for commit: " + commit);
        }
        entry = pendingCommits.firstEntry();
    }
}
Also used : WccData(org.apache.hadoop.nfs.nfs3.response.WccData) ClosedChannelException(java.nio.channels.ClosedChannelException) COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) XDR(org.apache.hadoop.oncrpc.XDR) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) AtomicLong(java.util.concurrent.atomic.AtomicLong) IOException(java.io.IOException)

Example 22 with WccData

use of org.apache.hadoop.nfs.nfs3.response.WccData in project hadoop by apache.

the class OpenFileCtx method processPerfectOverWrite.

/**
   * Honor 2 kinds of overwrites: 1). support some application like touch(write
   * the same content back to change mtime), 2) client somehow sends the same
   * write again in a different RPC.
   */
private WRITE3Response processPerfectOverWrite(DFSClient dfsClient, long offset, int count, WriteStableHow stableHow, byte[] data, String path, WccData wccData, IdMappingServiceProvider iug) {
    WRITE3Response response;
    // Read the content back
    byte[] readbuffer = new byte[count];
    int readCount = 0;
    FSDataInputStream fis = null;
    try {
        // Sync file data and length to avoid partial read failure
        fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    } catch (ClosedChannelException closedException) {
        LOG.info("The FSDataOutputStream has been closed. " + "Continue processing the perfect overwrite.");
    } catch (IOException e) {
        LOG.info("hsync failed when processing possible perfect overwrite, path=" + path + " error: " + e);
        return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
    }
    try {
        fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
        readCount = fis.read(offset, readbuffer, 0, count);
        if (readCount < count) {
            LOG.error("Can't read back " + count + " bytes, partial read size: " + readCount);
            return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        }
    } catch (IOException e) {
        LOG.info("Read failed when processing possible perfect overwrite, path=" + path, e);
        return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
    } finally {
        IOUtils.cleanup(LOG, fis);
    }
    // Compare with the request
    Comparator comparator = new Comparator();
    if (comparator.compare(readbuffer, 0, readCount, data, 0, count) != 0) {
        LOG.info("Perfect overwrite has different content");
        response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
    } else {
        LOG.info("Perfect overwrite has same content," + " updating the mtime, then return success");
        Nfs3FileAttributes postOpAttr = null;
        try {
            dfsClient.setTimes(path, Time.monotonicNow(), -1);
            postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
        } catch (IOException e) {
            LOG.info("Got error when processing perfect overwrite, path=" + path + " error: " + e);
            return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        }
        wccData.setPostOpAttr(postOpAttr);
        response = new WRITE3Response(Nfs3Status.NFS3_OK, wccData, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
    }
    return response;
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) IOException(java.io.IOException) WRITE3Response(org.apache.hadoop.nfs.nfs3.response.WRITE3Response) Comparator(org.apache.hadoop.io.BytesWritable.Comparator)

Example 23 with WccData

use of org.apache.hadoop.nfs.nfs3.response.WccData in project hadoop by apache.

the class RpcProgramNfs3 method rmdir.

@VisibleForTesting
RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    RMDIR3Request request;
    try {
        request = RMDIR3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid RMDIR request");
        return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle dirHandle = request.getHandle();
    String fileName = request.getName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS RMDIR dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress);
    }
    String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    Nfs3FileAttributes preOpDirAttr = null;
    Nfs3FileAttributes postOpDirAttr = null;
    try {
        preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (preOpDirAttr == null) {
            LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
            return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE);
        }
        WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr);
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
        }
        String fileIdPath = dirFileIdPath + "/" + fileName;
        HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
        if (fstat == null) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
        }
        if (!fstat.isDir()) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc);
        }
        if (fstat.getChildrenNum() > 0) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, errWcc);
        }
        boolean result = dfsClient.delete(fileIdPath, false);
        WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
        if (!result) {
            return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
        }
        return new RMDIR3Response(Nfs3Status.NFS3_OK, dirWcc);
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        // Try to return correct WccData
        if (postOpDirAttr == null) {
            try {
                postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
            } catch (IOException e1) {
                LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
            }
        }
        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr);
        int status = mapErrorStatus(e);
        return new RMDIR3Response(status, dirWcc);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) RMDIR3Response(org.apache.hadoop.nfs.nfs3.response.RMDIR3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) RMDIR3Request(org.apache.hadoop.nfs.nfs3.request.RMDIR3Request) IOException(java.io.IOException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 24 with WccData

use of org.apache.hadoop.nfs.nfs3.response.WccData in project hadoop by apache.

the class RpcProgramNfs3 method mkdir.

@VisibleForTesting
MKDIR3Response mkdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    MKDIR3Request request;
    try {
        request = MKDIR3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid MKDIR request");
        return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle dirHandle = request.getHandle();
    String fileName = request.getName();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS MKDIR dirId: " + dirHandle.getFileId() + " filename: " + fileName + " client: " + remoteAddress);
    }
    if (request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
        LOG.error("Setting file size is not supported when mkdir: " + fileName + " in dirHandle" + dirHandle);
        return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
    Nfs3FileAttributes preOpDirAttr = null;
    Nfs3FileAttributes postOpDirAttr = null;
    Nfs3FileAttributes postOpObjAttr = null;
    FileHandle objFileHandle = null;
    try {
        preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
        if (preOpDirAttr == null) {
            LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
            return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new MKDIR3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr));
        }
        final String fileIdPath = dirFileIdPath + "/" + fileName;
        SetAttr3 setAttr3 = request.getObjAttr();
        FsPermission permission = setAttr3.getUpdateFields().contains(SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode()) : FsPermission.getDefault().applyUMask(umask);
        if (!dfsClient.mkdirs(fileIdPath, permission, false)) {
            WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
            return new MKDIR3Response(Nfs3Status.NFS3ERR_IO, null, null, dirWcc);
        }
        // Set group if it's not specified in the request.
        if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) {
            setAttr3.getUpdateFields().add(SetAttrField.GID);
            setAttr3.setGid(securityHandler.getGid());
        }
        setattrInternal(dfsClient, fileIdPath, setAttr3, false);
        postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
        objFileHandle = new FileHandle(postOpObjAttr.getFileId());
        WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
        return new MKDIR3Response(Nfs3Status.NFS3_OK, new FileHandle(postOpObjAttr.getFileId()), postOpObjAttr, dirWcc);
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        // Try to return correct WccData
        if (postOpDirAttr == null) {
            try {
                postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
            } catch (IOException e1) {
                LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e);
            }
        }
        WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr);
        int status = mapErrorStatus(e);
        return new MKDIR3Response(status, objFileHandle, postOpObjAttr, dirWcc);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) MKDIR3Response(org.apache.hadoop.nfs.nfs3.response.MKDIR3Response) MKDIR3Request(org.apache.hadoop.nfs.nfs3.request.MKDIR3Request) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)17 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)17 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)16 IOException (java.io.IOException)15 VisibleForTesting (com.google.common.annotations.VisibleForTesting)9 DFSClient (org.apache.hadoop.hdfs.DFSClient)9 WRITE3Response (org.apache.hadoop.nfs.nfs3.response.WRITE3Response)8 XDR (org.apache.hadoop.oncrpc.XDR)8 VerifierNone (org.apache.hadoop.oncrpc.security.VerifierNone)8 WccAttr (org.apache.hadoop.nfs.nfs3.response.WccAttr)6 WriteStableHow (org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow)5 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 COMMIT3Response (org.apache.hadoop.nfs.nfs3.response.COMMIT3Response)3 ClosedChannelException (java.nio.channels.ClosedChannelException)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)2 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)2 File (java.io.File)1 RandomAccessFile (java.io.RandomAccessFile)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1