use of org.apache.hadoop.nfs.NfsTime in project hadoop by apache.
the class FSINFO3Response method deserialize.
public static FSINFO3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes postOpObjAttr = Nfs3FileAttributes.deserialize(xdr);
int rtmax = 0;
int rtpref = 0;
int rtmult = 0;
int wtmax = 0;
int wtpref = 0;
int wtmult = 0;
int dtpref = 0;
long maxFileSize = 0;
NfsTime timeDelta = null;
int properties = 0;
if (status == Nfs3Status.NFS3_OK) {
rtmax = xdr.readInt();
rtpref = xdr.readInt();
rtmult = xdr.readInt();
wtmax = xdr.readInt();
wtpref = xdr.readInt();
wtmult = xdr.readInt();
dtpref = xdr.readInt();
maxFileSize = xdr.readHyper();
timeDelta = NfsTime.deserialize(xdr);
properties = xdr.readInt();
}
return new FSINFO3Response(status, postOpObjAttr, rtmax, rtpref, rtmult, wtmax, wtpref, wtmult, dtpref, maxFileSize, timeDelta, properties);
}
use of org.apache.hadoop.nfs.NfsTime in project hadoop by apache.
the class RpcProgramNfs3 method fsinfo.
@VisibleForTesting
FSINFO3Response fsinfo(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
FSINFO3Request request;
try {
request = FSINFO3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid FSINFO request");
return new FSINFO3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS FSINFO fileId: " + handle.getFileId() + " client: " + remoteAddress);
}
try {
int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
int wtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_WRITE_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_WRITE_TRANSFER_SIZE_DEFAULT);
int dtperf = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READDIR_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READDIR_TRANSFER_SIZE_DEFAULT);
Nfs3FileAttributes attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
if (attrs == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new FSINFO3Response(Nfs3Status.NFS3ERR_STALE);
}
int fsProperty = Nfs3Constant.FSF3_CANSETTIME | Nfs3Constant.FSF3_HOMOGENEOUS;
return new FSINFO3Response(Nfs3Status.NFS3_OK, attrs, rtmax, rtmax, 1, wtmax, wtmax, 1, dtperf, Long.MAX_VALUE, new NfsTime(1), fsProperty);
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new FSINFO3Response(status);
}
}
use of org.apache.hadoop.nfs.NfsTime in project hadoop by apache.
the class Nfs3Utils method getWccAttr.
public static WccAttr getWccAttr(DFSClient client, String fileIdPath) throws IOException {
HdfsFileStatus fstat = getFileStatus(client, fileIdPath);
if (fstat == null) {
return null;
}
long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat.getLen();
return new WccAttr(size, new NfsTime(fstat.getModificationTime()), new NfsTime(fstat.getModificationTime()));
}
Aggregations