Search in sources :

Example 1 with READLINK3Request

use of org.apache.hadoop.nfs.nfs3.request.READLINK3Request in project hadoop by apache.

the class TestRpcProgramNfs3 method testReadlink.

@Test(timeout = 60000)
public void testReadlink() throws Exception {
    // Create a symlink first.
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    XDR xdr_req = new XDR();
    FileHandle handle = new FileHandle(dirId);
    SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(), "bar");
    req.serialize(xdr_req);
    SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response.getStatus());
    // Now perform readlink operations.
    FileHandle handle2 = response.getObjFileHandle();
    XDR xdr_req2 = new XDR();
    READLINK3Request req2 = new READLINK3Request(handle2);
    req2.serialize(xdr_req2);
    // Attempt by an unpriviledged user should fail.
    READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
    // Attempt by a priviledged user should pass.
    READLINK3Response response2 = nfsd.readlink(xdr_req2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus());
}
Also used : SYMLINK3Request(org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request) SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) READLINK3Request(org.apache.hadoop.nfs.nfs3.request.READLINK3Request) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) READLINK3Response(org.apache.hadoop.nfs.nfs3.response.READLINK3Response) SYMLINK3Response(org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response) Test(org.junit.Test)

Example 2 with READLINK3Request

use of org.apache.hadoop.nfs.nfs3.request.READLINK3Request in project hadoop by apache.

the class RpcProgramNfs3 method readlink.

@VisibleForTesting
READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    READLINK3Response response = new READLINK3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    READLINK3Request request;
    try {
        request = READLINK3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid READLINK request");
        return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS READLINK fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    String fileIdPath = Nfs3Utils.getFileIdPath(handle);
    try {
        String target = dfsClient.getLinkTarget(fileIdPath);
        Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
        if (postOpAttr == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new READLINK3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (postOpAttr.getType() != NfsFileType.NFSLNK.toValue()) {
            LOG.error("Not a symlink, fileId: " + handle.getFileId());
            return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
        }
        if (target == null) {
            LOG.error("Symlink target should not be null, fileId: " + handle.getFileId());
            return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
        }
        int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
        if (rtmax < target.getBytes(Charset.forName("UTF-8")).length) {
            LOG.error("Link size: " + target.getBytes(Charset.forName("UTF-8")).length + " is larger than max transfer size: " + rtmax);
            return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr, new byte[0]);
        }
        return new READLINK3Response(Nfs3Status.NFS3_OK, postOpAttr, target.getBytes(Charset.forName("UTF-8")));
    } catch (IOException e) {
        LOG.warn("Readlink error: " + e.getClass(), e);
        int status = mapErrorStatus(e);
        return new READLINK3Response(status);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) READLINK3Request(org.apache.hadoop.nfs.nfs3.request.READLINK3Request) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) READLINK3Response(org.apache.hadoop.nfs.nfs3.response.READLINK3Response) IOException(java.io.IOException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)2 READLINK3Request (org.apache.hadoop.nfs.nfs3.request.READLINK3Request)2 READLINK3Response (org.apache.hadoop.nfs.nfs3.response.READLINK3Response)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 IOException (java.io.IOException)1 InetSocketAddress (java.net.InetSocketAddress)1 DFSClient (org.apache.hadoop.hdfs.DFSClient)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)1 SYMLINK3Request (org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request)1 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)1 SYMLINK3Response (org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response)1 XDR (org.apache.hadoop.oncrpc.XDR)1 Test (org.junit.Test)1