Search in sources :

Example 1 with XDR

use of org.apache.hadoop.oncrpc.XDR in project hadoop by apache.

the class TestWrites method testOverlappingWrites.

@Test
public void testOverlappingWrites() throws IOException, InterruptedException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    RpcProgramNfs3 nfsd;
    final int bufSize = 32;
    SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
    String currentUser = System.getProperty("user.name");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Nfs3 nfs3 = new Nfs3(config);
        nfs3.startServiceInternal(false);
        nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
        DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
        HdfsFileStatus status = dfsClient.getFileInfo("/");
        FileHandle rootHandle = new FileHandle(status.getFileId());
        CREATE3Request createReq = new CREATE3Request(rootHandle, "overlapping-writes" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
        XDR createXdr = new XDR();
        createReq.serialize(createXdr);
        CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        FileHandle handle = createRsp.getObjHandle();
        byte[] buffer = new byte[bufSize];
        for (int i = 0; i < bufSize; i++) {
            buffer[i] = (byte) i;
        }
        int[][] ranges = new int[][] { { 0, 10 }, { 5, 7 }, { 5, 5 }, { 10, 6 }, { 18, 6 }, { 20, 6 }, { 28, 4 }, { 16, 2 }, { 25, 4 } };
        for (int i = 0; i < ranges.length; i++) {
            int[] x = ranges[i];
            byte[] tbuffer = new byte[x[1]];
            for (int j = 0; j < x[1]; j++) {
                tbuffer[j] = buffer[x[0] + j];
            }
            WRITE3Request writeReq = new WRITE3Request(handle, (long) x[0], x[1], WriteStableHow.UNSTABLE, ByteBuffer.wrap(tbuffer));
            XDR writeXdr = new XDR();
            writeReq.serialize(writeXdr);
            nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
        }
        waitWrite(nfsd, handle, 60000);
        READ3Request readReq = new READ3Request(handle, 0, bufSize);
        XDR readXdr = new XDR();
        readReq.serialize(readXdr);
        READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
        assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) SecurityHandler(org.apache.hadoop.oncrpc.security.SecurityHandler) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) XDR(org.apache.hadoop.oncrpc.XDR) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) READ3Request(org.apache.hadoop.nfs.nfs3.request.READ3Request) CREATE3Response(org.apache.hadoop.nfs.nfs3.response.CREATE3Response) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) CREATE3Request(org.apache.hadoop.nfs.nfs3.request.CREATE3Request) READ3Response(org.apache.hadoop.nfs.nfs3.response.READ3Response) Test(org.junit.Test)

Example 2 with XDR

use of org.apache.hadoop.oncrpc.XDR in project hadoop by apache.

the class TestWrites method testOOOWrites.

@Test
public void testOOOWrites() throws IOException, InterruptedException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    RpcProgramNfs3 nfsd;
    final int bufSize = 32;
    final int numOOO = 3;
    SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
    String currentUser = System.getProperty("user.name");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Nfs3 nfs3 = new Nfs3(config);
        nfs3.startServiceInternal(false);
        nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
        DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
        HdfsFileStatus status = dfsClient.getFileInfo("/");
        FileHandle rootHandle = new FileHandle(status.getFileId());
        CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
        XDR createXdr = new XDR();
        createReq.serialize(createXdr);
        CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        FileHandle handle = createRsp.getObjHandle();
        byte[][] oooBuf = new byte[numOOO][bufSize];
        for (int i = 0; i < numOOO; i++) {
            Arrays.fill(oooBuf[i], (byte) i);
        }
        for (int i = 0; i < numOOO; i++) {
            final long offset = (numOOO - 1 - i) * bufSize;
            WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
            XDR writeXdr = new XDR();
            writeReq.serialize(writeXdr);
            nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
        }
        waitWrite(nfsd, handle, 60000);
        READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
        XDR readXdr = new XDR();
        readReq.serialize(readXdr);
        READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
        assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) SecurityHandler(org.apache.hadoop.oncrpc.security.SecurityHandler) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) XDR(org.apache.hadoop.oncrpc.XDR) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) READ3Request(org.apache.hadoop.nfs.nfs3.request.READ3Request) CREATE3Response(org.apache.hadoop.nfs.nfs3.response.CREATE3Response) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) CREATE3Request(org.apache.hadoop.nfs.nfs3.request.CREATE3Request) READ3Response(org.apache.hadoop.nfs.nfs3.response.READ3Response) Test(org.junit.Test)

Example 3 with XDR

use of org.apache.hadoop.oncrpc.XDR in project hadoop by apache.

the class WriteManager method handleWrite.

void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, Nfs3FileAttributes preOpAttr) throws IOException {
    int count = request.getCount();
    byte[] data = request.getData().array();
    if (data.length < count) {
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
        Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
        return;
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("handleWrite " + request);
    }
    // Check if there is a stream to write
    FileHandle fileHandle = request.getHandle();
    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx == null) {
        LOG.info("No opened stream for fileId: " + fileHandle.getFileId());
        String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
        HdfsDataOutputStream fos = null;
        Nfs3FileAttributes latestAttr = null;
        try {
            int bufferSize = config.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
            fos = dfsClient.append(fileIdPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
            latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
        } catch (RemoteException e) {
            IOException io = e.unwrapRemoteException();
            if (io instanceof AlreadyBeingCreatedException) {
                LOG.warn("Can't append file: " + fileIdPath + ". Possibly the file is being closed. Drop the request: " + request + ", wait for the client to retry...");
                return;
            }
            throw e;
        } catch (IOException e) {
            LOG.error("Can't append to file: " + fileIdPath, e);
            if (fos != null) {
                fos.close();
            }
            WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr);
            WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, count, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
            Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
            return;
        }
        // Add open stream
        String writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
        openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/" + fileHandle.getFileId(), dfsClient, iug, aixCompatMode, config);
        if (!addOpenFileStream(fileHandle, openFileCtx)) {
            LOG.info("Can't add new stream. Close it. Tell client to retry.");
            try {
                fos.close();
            } catch (IOException e) {
                LOG.error("Can't close stream for fileId: " + handle.getFileId(), e);
            }
            // Notify client to retry
            WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
            WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
            Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
            return;
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Opened stream for appending file: " + fileHandle.getFileId());
        }
    }
    // Add write into the async job queue
    openFileCtx.receivedNewWrite(dfsClient, request, channel, xid, asyncDataService, iug);
    return;
}
Also used : WccData(org.apache.hadoop.nfs.nfs3.response.WccData) AlreadyBeingCreatedException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) XDR(org.apache.hadoop.oncrpc.XDR) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) WRITE3Response(org.apache.hadoop.nfs.nfs3.response.WRITE3Response) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 4 with XDR

use of org.apache.hadoop.oncrpc.XDR in project hadoop by apache.

the class WriteManager method handleCommit.

void handleCommit(DFSClient dfsClient, FileHandle fileHandle, long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) {
    long startTime = System.nanoTime();
    int status;
    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx == null) {
        LOG.info("No opened stream for fileId: " + fileHandle.getFileId() + " commitOffset=" + commitOffset + ". Return success in this case.");
        status = Nfs3Status.NFS3_OK;
    } else {
        COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset, channel, xid, preOpAttr, false);
        switch(ret) {
            case COMMIT_FINISHED:
            case COMMIT_INACTIVE_CTX:
                status = Nfs3Status.NFS3_OK;
                break;
            case COMMIT_INACTIVE_WITH_PENDING_WRITE:
            case COMMIT_ERROR:
                status = Nfs3Status.NFS3ERR_IO;
                break;
            case COMMIT_WAIT:
                // Do nothing. Commit is async now.
                return;
            case COMMIT_SPECIAL_WAIT:
                status = Nfs3Status.NFS3ERR_JUKEBOX;
                break;
            case COMMIT_SPECIAL_SUCCESS:
                status = Nfs3Status.NFS3_OK;
                break;
            default:
                LOG.error("Should not get commit return code: " + ret.name());
                throw new RuntimeException("Should not get commit return code: " + ret.name());
        }
    }
    // Send out the response
    Nfs3FileAttributes postOpAttr = null;
    try {
        postOpAttr = getFileAttr(dfsClient, new FileHandle(preOpAttr.getFileId()), iug);
    } catch (IOException e1) {
        LOG.info("Can't get postOpAttr for fileId: " + preOpAttr.getFileId(), e1);
    }
    WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
    COMMIT3Response response = new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF);
    RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(startTime));
    Nfs3Utils.writeChannelCommit(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
}
Also used : WccData(org.apache.hadoop.nfs.nfs3.response.WccData) COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) XDR(org.apache.hadoop.oncrpc.XDR) VerifierNone(org.apache.hadoop.oncrpc.security.VerifierNone) IOException(java.io.IOException) COMMIT_STATUS(org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS)

Example 5 with XDR

use of org.apache.hadoop.oncrpc.XDR in project hadoop by apache.

the class TestMountd method testStart.

@Test
public void testStart() throws IOException {
    // Start minicluster
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    // 1s
    int newTimeoutMillis = 1000;
    // Set the new portmap rpc timeout values and check
    config.setInt(NfsConfigKeys.NFS_UDP_CLIENT_PORTMAP_TIMEOUT_MILLIS_KEY, newTimeoutMillis);
    assertTrue(config.getInt(NfsConfigKeys.NFS_UDP_CLIENT_PORTMAP_TIMEOUT_MILLIS_KEY, 0) == newTimeoutMillis);
    // Start nfs
    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd().getRpcProgram();
    mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
    assertTrue(mountd.getPortmapUdpTimeoutMillis() == newTimeoutMillis);
    RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
    nfsd.nullProcedure();
    assertTrue(nfsd.getPortmapUdpTimeoutMillis() == newTimeoutMillis);
    cluster.shutdown();
}
Also used : RpcProgramMountd(org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) RpcProgramNfs3(org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3) XDR(org.apache.hadoop.oncrpc.XDR) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) RpcProgramNfs3(org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3) Nfs3(org.apache.hadoop.hdfs.nfs.nfs3.Nfs3) Test(org.junit.Test)

Aggregations

XDR (org.apache.hadoop.oncrpc.XDR)53 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)33 Test (org.junit.Test)32 InetSocketAddress (java.net.InetSocketAddress)30 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)28 VerifierNone (org.apache.hadoop.oncrpc.security.VerifierNone)15 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)9 WRITE3Response (org.apache.hadoop.nfs.nfs3.response.WRITE3Response)8 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)8 WRITE3Request (org.apache.hadoop.nfs.nfs3.request.WRITE3Request)6 IOException (java.io.IOException)5 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)5 CREATE3Request (org.apache.hadoop.nfs.nfs3.request.CREATE3Request)5 READ3Request (org.apache.hadoop.nfs.nfs3.request.READ3Request)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 CREATE3Response (org.apache.hadoop.nfs.nfs3.response.CREATE3Response)4 READ3Response (org.apache.hadoop.nfs.nfs3.response.READ3Response)4 RpcCall (org.apache.hadoop.oncrpc.RpcCall)4 CredentialsNone (org.apache.hadoop.oncrpc.security.CredentialsNone)4 Path (org.apache.hadoop.fs.Path)3