use of org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response in project hadoop by apache.
the class RpcProgramNfs3 method pathconf.
@VisibleForTesting
PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
PATHCONF3Request request;
try {
request = PATHCONF3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid PATHCONF request");
return new PATHCONF3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
Nfs3FileAttributes attrs;
if (LOG.isDebugEnabled()) {
LOG.debug("NFS PATHCONF fileId: " + handle.getFileId() + " client: " + remoteAddress);
}
try {
attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
if (attrs == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new PATHCONF3Response(Nfs3Status.NFS3ERR_STALE);
}
return new PATHCONF3Response(Nfs3Status.NFS3_OK, attrs, 0, HdfsServerConstants.MAX_PATH_LENGTH, true, false, false, true);
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new PATHCONF3Response(status);
}
}
use of org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response in project hadoop by apache.
the class TestRpcProgramNfs3 method testPathconf.
@Test(timeout = 60000)
public void testPathconf() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
PATHCONF3Request req = new PATHCONF3Request(handle);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
// Attempt by a priviledged user should pass.
PATHCONF3Response response2 = nfsd.pathconf(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus());
}
use of org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response in project hadoop by apache.
the class PATHCONF3Response method deserialize.
public static PATHCONF3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes objPostOpAttr = Nfs3FileAttributes.deserialize(xdr);
int linkMax = 0;
int nameMax = 0;
boolean noTrunc = false;
boolean chownRestricted = false;
boolean caseInsensitive = false;
boolean casePreserving = false;
if (status == Nfs3Status.NFS3_OK) {
linkMax = xdr.readInt();
nameMax = xdr.readInt();
noTrunc = xdr.readBoolean();
chownRestricted = xdr.readBoolean();
caseInsensitive = xdr.readBoolean();
casePreserving = xdr.readBoolean();
}
return new PATHCONF3Response(status, objPostOpAttr, linkMax, nameMax, noTrunc, chownRestricted, caseInsensitive, casePreserving);
}
Aggregations