Search in sources :

Example 6 with WRITE3Request

use of org.apache.hadoop.nfs.nfs3.request.WRITE3Request in project hadoop by apache.

the class OpenFileCtx method processOverWrite.

/** Process an overwrite write request */
private void processOverWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, IdMappingServiceProvider iug) {
    WccData wccData = new WccData(latestAttr.getWccAttr(), null);
    long offset = request.getOffset();
    int count = request.getCount();
    WriteStableHow stableHow = request.getStableHow();
    WRITE3Response response;
    long cachedOffset = nextOffset.get();
    if (offset + count > cachedOffset) {
        LOG.warn("Treat this jumbo write as a real random write, no support.");
        response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0, WriteStableHow.UNSTABLE, Nfs3Constant.WRITE_COMMIT_VERF);
    } else {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Process perfectOverWrite");
        }
        // TODO: let executor handle perfect overwrite
        response = processPerfectOverWrite(dfsClient, offset, count, stableHow, request.getData().array(), Nfs3Utils.getFileIdPath(request.getHandle()), wccData, iug);
    }
    updateLastAccessTime();
    Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
}
Also used : WccData(org.apache.hadoop.nfs.nfs3.response.WccData) WriteStableHow(org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow) XDR(org.apache.hadoop.oncrpc.XDR) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) WRITE3Response(org.apache.hadoop.nfs.nfs3.response.WRITE3Response)

Example 7 with WRITE3Request

use of org.apache.hadoop.nfs.nfs3.request.WRITE3Request in project hadoop by apache.

the class OpenFileCtx method receivedNewWriteInternal.

private void receivedNewWriteInternal(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, AsyncDataService asyncDataService, IdMappingServiceProvider iug) {
    WriteStableHow stableHow = request.getStableHow();
    WccAttr preOpAttr = latestAttr.getWccAttr();
    int count = request.getCount();
    WriteCtx writeCtx = addWritesToCache(request, channel, xid);
    if (writeCtx == null) {
        // offset < nextOffset
        processOverWrite(dfsClient, request, channel, xid, iug);
    } else {
        // The write is added to pendingWrites.
        // Check and start writing back if necessary
        boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
        if (!startWriting) {
            // offset > nextOffset. check if we need to dump data
            waitForDump();
            // for unstable non-sequential write
            if (stableHow != WriteStableHow.UNSTABLE) {
                LOG.info("Have to change stable write to unstable write: " + request.getStableHow());
                stableHow = WriteStableHow.UNSTABLE;
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("UNSTABLE write request, send response for offset: " + writeCtx.getOffset());
            }
            WccData fileWcc = new WccData(preOpAttr, latestAttr);
            WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
            RpcProgramNfs3.metrics.addWrite(Nfs3Utils.getElapsedTime(writeCtx.startTime));
            Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
            writeCtx.setReplied(true);
        }
    }
}
Also used : WccData(org.apache.hadoop.nfs.nfs3.response.WccData) WriteStableHow(org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow) XDR(org.apache.hadoop.oncrpc.XDR) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) WccAttr(org.apache.hadoop.nfs.nfs3.response.WccAttr) WRITE3Response(org.apache.hadoop.nfs.nfs3.response.WRITE3Response)

Example 8 with WRITE3Request

use of org.apache.hadoop.nfs.nfs3.request.WRITE3Request in project hadoop by apache.

the class OpenFileCtx method receivedNewWrite.

public void receivedNewWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, AsyncDataService asyncDataService, IdMappingServiceProvider iug) {
    if (!activeState) {
        LOG.info("OpenFileCtx is inactive, fileId: " + request.getHandle().getFileId());
        WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
    } else {
        // Update the write time first
        updateLastAccessTime();
        // Handle repeated write requests (same xid or not).
        // If already replied, send reply again. If not replied, drop the
        // repeated request.
        WriteCtx existantWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
        if (existantWriteCtx != null) {
            if (!existantWriteCtx.getReplied()) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Repeated write request which hasn't been served: xid=" + xid + ", drop it.");
                }
            } else {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Repeated write request which is already served: xid=" + xid + ", resend response.");
                }
                WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
                WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, request.getCount(), request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
                Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
            }
        } else {
            // not a repeated write request
            receivedNewWriteInternal(dfsClient, request, channel, xid, asyncDataService, iug);
        }
    }
}
Also used : WccData(org.apache.hadoop.nfs.nfs3.response.WccData) XDR(org.apache.hadoop.oncrpc.XDR) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) WRITE3Response(org.apache.hadoop.nfs.nfs3.response.WRITE3Response)

Example 9 with WRITE3Request

use of org.apache.hadoop.nfs.nfs3.request.WRITE3Request in project hadoop by apache.

the class TestOutOfOrderWrite method write.

static XDR write(FileHandle handle, int xid, long offset, int count, byte[] data) {
    XDR request = new XDR();
    RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(), new VerifierNone()).write(request);
    WRITE3Request write1 = new WRITE3Request(handle, offset, count, WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
    write1.serialize(request);
    return request;
}
Also used : XDR(org.apache.hadoop.oncrpc.XDR) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) CredentialsNone(org.apache.hadoop.oncrpc.security.CredentialsNone)

Example 10 with WRITE3Request

use of org.apache.hadoop.nfs.nfs3.request.WRITE3Request in project hadoop by apache.

the class TestWrites method testAlterWriteRequest.

@Test
public void testAlterWriteRequest() throws IOException {
    int len = 20;
    byte[] data = new byte[len];
    ByteBuffer buffer = ByteBuffer.wrap(data);
    for (int i = 0; i < len; i++) {
        buffer.put((byte) i);
    }
    buffer.flip();
    int originalCount = buffer.array().length;
    WRITE3Request request = new WRITE3Request(new FileHandle(), 0, data.length, WriteStableHow.UNSTABLE, buffer);
    WriteCtx writeCtx1 = new WriteCtx(request.getHandle(), request.getOffset(), request.getCount(), WriteCtx.INVALID_ORIGINAL_COUNT, request.getStableHow(), request.getData(), null, 1, false, WriteCtx.DataState.NO_DUMP);
    Assert.assertTrue(writeCtx1.getData().array().length == originalCount);
    // Now change the write request
    OpenFileCtx.alterWriteRequest(request, 12);
    WriteCtx writeCtx2 = new WriteCtx(request.getHandle(), request.getOffset(), request.getCount(), originalCount, request.getStableHow(), request.getData(), null, 2, false, WriteCtx.DataState.NO_DUMP);
    ByteBuffer appendedData = writeCtx2.getData();
    int position = appendedData.position();
    int limit = appendedData.limit();
    Assert.assertTrue(position == 12);
    Assert.assertTrue(limit - position == 8);
    Assert.assertTrue(appendedData.get(position) == (byte) 12);
    Assert.assertTrue(appendedData.get(position + 1) == (byte) 13);
    Assert.assertTrue(appendedData.get(position + 2) == (byte) 14);
    Assert.assertTrue(appendedData.get(position + 7) == (byte) 19);
    // Test current file write offset is at boundaries
    buffer.position(0);
    request = new WRITE3Request(new FileHandle(), 0, data.length, WriteStableHow.UNSTABLE, buffer);
    OpenFileCtx.alterWriteRequest(request, 1);
    WriteCtx writeCtx3 = new WriteCtx(request.getHandle(), request.getOffset(), request.getCount(), originalCount, request.getStableHow(), request.getData(), null, 2, false, WriteCtx.DataState.NO_DUMP);
    appendedData = writeCtx3.getData();
    position = appendedData.position();
    limit = appendedData.limit();
    Assert.assertTrue(position == 1);
    Assert.assertTrue(limit - position == 19);
    Assert.assertTrue(appendedData.get(position) == (byte) 1);
    Assert.assertTrue(appendedData.get(position + 18) == (byte) 19);
    // Reset buffer position before test another boundary
    buffer.position(0);
    request = new WRITE3Request(new FileHandle(), 0, data.length, WriteStableHow.UNSTABLE, buffer);
    OpenFileCtx.alterWriteRequest(request, 19);
    WriteCtx writeCtx4 = new WriteCtx(request.getHandle(), request.getOffset(), request.getCount(), originalCount, request.getStableHow(), request.getData(), null, 2, false, WriteCtx.DataState.NO_DUMP);
    appendedData = writeCtx4.getData();
    position = appendedData.position();
    limit = appendedData.limit();
    Assert.assertTrue(position == 19);
    Assert.assertTrue(limit - position == 1);
    Assert.assertTrue(appendedData.get(position) == (byte) 19);
}
Also used : FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Aggregations

XDR (org.apache.hadoop.oncrpc.XDR)10 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)9 WRITE3Request (org.apache.hadoop.nfs.nfs3.request.WRITE3Request)8 WRITE3Response (org.apache.hadoop.nfs.nfs3.response.WRITE3Response)7 InetSocketAddress (java.net.InetSocketAddress)5 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)5 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)5 VerifierNone (org.apache.hadoop.oncrpc.security.VerifierNone)5 Test (org.junit.Test)5 DFSClient (org.apache.hadoop.hdfs.DFSClient)4 WriteStableHow (org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow)4 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)3 CREATE3Request (org.apache.hadoop.nfs.nfs3.request.CREATE3Request)3 READ3Request (org.apache.hadoop.nfs.nfs3.request.READ3Request)3 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)3 CREATE3Response (org.apache.hadoop.nfs.nfs3.response.CREATE3Response)3 READ3Response (org.apache.hadoop.nfs.nfs3.response.READ3Response)3 SecurityHandler (org.apache.hadoop.oncrpc.security.SecurityHandler)3 IOException (java.io.IOException)2