Search in sources :

Example 1 with COMMIT3Response

use of org.apache.hadoop.nfs.nfs3.response.COMMIT3Response in project hadoop by apache.

the class WriteManager method handleCommit.

void handleCommit(DFSClient dfsClient, FileHandle fileHandle, long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
    long startTime = System.nanoTime();
    int status;
    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx == null) {
        LOG.info("No opened stream for fileId: " + fileHandle.getFileId() + " commitOffset=" + commitOffset + ". Return success in this case.");
        status = Nfs3Status.NFS3_OK;
    } else {
        COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset, channel, xid, preOpAttr, false);
        switch(ret) {
            case COMMIT_FINISHED:
            case COMMIT_INACTIVE_CTX:
                status = Nfs3Status.NFS3_OK;
                break;
            case COMMIT_INACTIVE_WITH_PENDING_WRITE:
            case COMMIT_ERROR:
                status = Nfs3Status.NFS3ERR_IO;
                break;
            case COMMIT_WAIT:
                // Do nothing. Commit is async now.
                return;
            case COMMIT_SPECIAL_WAIT:
                status = Nfs3Status.NFS3ERR_JUKEBOX;
                break;
            case COMMIT_SPECIAL_SUCCESS:
                status = Nfs3Status.NFS3_OK;
                break;
            default:
                LOG.error("Should not get commit return code: " + ret.name());
                throw new RuntimeException("Should not get commit return code: " + ret.name());
        }
    }
    // Send out the response
    Nfs3FileAttributes postOpAttr = null;
    try {
        postOpAttr = getFileAttr(dfsClient, new FileHandle(preOpAttr.getFileId()), iug);
    } catch (IOException e1) {
        LOG.info("Can't get postOpAttr for fileId: " + preOpAttr.getFileId(), e1);
    }
    WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
    COMMIT3Response response = new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF);
    RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(startTime));
    Nfs3Utils.writeChannelCommit(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
}
Also used : WccData(org.apache.hadoop.nfs.nfs3.response.WccData) COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) XDR(org.apache.hadoop.oncrpc.XDR) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) IOException(java.io.IOException) COMMIT_STATUS(org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS)

Example 2 with COMMIT3Response

use of org.apache.hadoop.nfs.nfs3.response.COMMIT3Response in project hadoop by apache.

the class RpcProgramNfs3 method commit.

@VisibleForTesting
COMMIT3Response commit(XDR xdr, Channel channel, int xid, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    COMMIT3Request request;
    try {
        request = COMMIT3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid COMMIT request");
        response.setStatus(Nfs3Status.NFS3ERR_INVAL);
        return response;
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS COMMIT fileId: " + handle.getFileId() + " offset=" + request.getOffset() + " count=" + request.getCount() + " client: " + remoteAddress);
    }
    String fileIdPath = Nfs3Utils.getFileIdPath(handle);
    Nfs3FileAttributes preOpAttr = null;
    try {
        preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
        if (preOpAttr == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
            return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), Nfs3Constant.WRITE_COMMIT_VERF);
        }
        long commitOffset = (request.getCount() == 0) ? 0 : (request.getOffset() + request.getCount());
        // Insert commit as an async request
        writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid, preOpAttr);
        return null;
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        Nfs3FileAttributes postOpAttr = null;
        try {
            postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
        } catch (IOException e1) {
            LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
        }
        WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
        int status = mapErrorStatus(e);
        return new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) WccData(org.apache.hadoop.nfs.nfs3.response.WccData) COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) COMMIT3Request(org.apache.hadoop.nfs.nfs3.request.COMMIT3Request) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 3 with COMMIT3Response

use of org.apache.hadoop.nfs.nfs3.response.COMMIT3Response in project hadoop by apache.

the class OpenFileCtx method processCommits.

private void processCommits(long offset) {
    Preconditions.checkState(offset > 0);
    long flushedOffset = getFlushedOffset();
    Entry<Long, CommitCtx> entry = pendingCommits.firstEntry();
    if (entry == null || entry.getValue().offset > flushedOffset) {
        return;
    }
    // Now do sync for the ready commits
    int status = Nfs3Status.NFS3ERR_IO;
    try {
        // Sync file data and length
        fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
        status = Nfs3Status.NFS3_OK;
    } catch (ClosedChannelException cce) {
        if (!pendingWrites.isEmpty()) {
            LOG.error("Can't sync for fileId: " + latestAttr.getFileId() + ". Channel closed with writes pending.", cce);
        }
        status = Nfs3Status.NFS3ERR_IO;
    } catch (IOException e) {
        LOG.error("Got stream error during data sync: ", e);
        // Do nothing. Stream will be closed eventually by StreamMonitor.
        status = Nfs3Status.NFS3ERR_IO;
    }
    // Update latestAttr
    try {
        latestAttr = Nfs3Utils.getFileAttr(client, Nfs3Utils.getFileIdPath(latestAttr.getFileId()), iug);
    } catch (IOException e) {
        LOG.error("Can't get new file attr, fileId: " + latestAttr.getFileId(), e);
        status = Nfs3Status.NFS3ERR_IO;
    }
    if (latestAttr.getSize() != offset) {
        LOG.error("After sync, the expect file size: " + offset + ", however actual file size is: " + latestAttr.getSize());
        status = Nfs3Status.NFS3ERR_IO;
    }
    WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr);
    // Send response for the ready commits
    while (entry != null && entry.getValue().offset <= flushedOffset) {
        pendingCommits.remove(entry.getKey());
        CommitCtx commit = entry.getValue();
        COMMIT3Response response = new COMMIT3Response(status, wccData, Nfs3Constant.WRITE_COMMIT_VERF);
        RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(commit.startTime));
        Nfs3Utils.writeChannelCommit(commit.getChannel(), response.serialize(new XDR(), commit.getXid(), new VerifierNone()), commit.getXid());
        if (LOG.isDebugEnabled()) {
            LOG.debug("FileId: " + latestAttr.getFileId() + " Service time: " + Nfs3Utils.getElapsedTime(commit.startTime) + "ns. Sent response for commit: " + commit);
        }
        entry = pendingCommits.firstEntry();
    }
}
Also used : WccData(org.apache.hadoop.nfs.nfs3.response.WccData) ClosedChannelException(java.nio.channels.ClosedChannelException) COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) XDR(org.apache.hadoop.oncrpc.XDR) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) AtomicLong(java.util.concurrent.atomic.AtomicLong) IOException(java.io.IOException)

Example 4 with COMMIT3Response

use of org.apache.hadoop.nfs.nfs3.response.COMMIT3Response in project hadoop by apache.

the class TestRpcProgramNfs3 method testCommit.

@Test(timeout = 60000)
public void testCommit() throws Exception {
    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
    long dirId = status.getFileId();
    FileHandle handle = new FileHandle(dirId);
    XDR xdr_req = new XDR();
    COMMIT3Request req = new COMMIT3Request(handle, 0, 5);
    req.serialize(xdr_req);
    Channel ch = Mockito.mock(Channel.class);
    // Attempt by an unpriviledged user should fail.
    COMMIT3Response response1 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
    // Attempt by a priviledged user should pass.
    COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect COMMIT3Response:", null, response2);
}
Also used : COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) Channel(org.jboss.netty.channel.Channel) COMMIT3Request(org.apache.hadoop.nfs.nfs3.request.COMMIT3Request) Test(org.junit.Test)

Example 5 with COMMIT3Response

use of org.apache.hadoop.nfs.nfs3.response.COMMIT3Response in project hadoop by apache.

the class TestRpcProgramNfs3 method commit.

private void commit(String fileName, int len) throws Exception {
    final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
    final long dirId = status.getFileId();
    final FileHandle handle = new FileHandle(dirId);
    final XDR xdr_req = new XDR();
    final COMMIT3Request req = new COMMIT3Request(handle, 0, len);
    req.serialize(xdr_req);
    Channel ch = Mockito.mock(Channel.class);
    COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect COMMIT3Response:", null, response2);
}
Also used : COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) Channel(org.jboss.netty.channel.Channel) COMMIT3Request(org.apache.hadoop.nfs.nfs3.request.COMMIT3Request)

Aggregations

COMMIT3Response (org.apache.hadoop.nfs.nfs3.response.COMMIT3Response)5 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)4 XDR (org.apache.hadoop.oncrpc.XDR)4 IOException (java.io.IOException)3 COMMIT3Request (org.apache.hadoop.nfs.nfs3.request.COMMIT3Request)3 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)3 InetSocketAddress (java.net.InetSocketAddress)2 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)2 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)2 VerifierNone (org.apache.hadoop.oncrpc.security.VerifierNone)2 Channel (org.jboss.netty.channel.Channel)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ClosedChannelException (java.nio.channels.ClosedChannelException)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 DFSClient (org.apache.hadoop.hdfs.DFSClient)1 COMMIT_STATUS (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS)1 Test (org.junit.Test)1