use of org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response in project hadoop by apache.
the class FSSTAT3Response method deserialize.
public static FSSTAT3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes postOpAttr = Nfs3FileAttributes.deserialize(xdr);
long tbytes = 0;
long fbytes = 0;
long abytes = 0;
long tfiles = 0;
long ffiles = 0;
long afiles = 0;
int invarsec = 0;
if (status == Nfs3Status.NFS3_OK) {
tbytes = xdr.readHyper();
fbytes = xdr.readHyper();
abytes = xdr.readHyper();
tfiles = xdr.readHyper();
ffiles = xdr.readHyper();
afiles = xdr.readHyper();
invarsec = xdr.readInt();
}
return new FSSTAT3Response(status, postOpAttr, tbytes, fbytes, abytes, tfiles, ffiles, afiles, invarsec);
}
use of org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response in project hadoop by apache.
the class TestRpcProgramNfs3 method testFsstat.
@Test(timeout = 60000)
public void testFsstat() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
FSSTAT3Request req = new FSSTAT3Request(handle);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
// Attempt by a priviledged user should pass.
FSSTAT3Response response2 = nfsd.fsstat(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus());
}
use of org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response in project hadoop by apache.
the class RpcProgramNfs3 method fsstat.
@VisibleForTesting
FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
FSSTAT3Request request;
try {
request = FSSTAT3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid FSSTAT request");
return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: " + remoteAddress);
}
try {
FsStatus fsStatus = dfsClient.getDiskStatus();
long totalBytes = fsStatus.getCapacity();
long freeBytes = fsStatus.getRemaining();
Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle, iug);
if (attrs == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
}
long maxFsObjects = config.getLong("dfs.max.objects", 0);
if (maxFsObjects == 0) {
// A value of zero in HDFS indicates no limit to the number
// of objects that dfs supports. Using Integer.MAX_VALUE instead of
// Long.MAX_VALUE so 32bit client won't complain.
maxFsObjects = Integer.MAX_VALUE;
}
return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes, freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0);
} catch (RemoteException r) {
LOG.warn("Exception ", r);
IOException io = r.unwrapRemoteException();
/**
* AuthorizationException can be thrown if the user can't be proxy'ed.
*/
if (io instanceof AuthorizationException) {
return new FSSTAT3Response(Nfs3Status.NFS3ERR_ACCES);
} else {
return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
}
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new FSSTAT3Response(status);
}
}
Aggregations