use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class RpcProgramNfs3 method write.
@VisibleForTesting
WRITE3Response write(XDR xdr, Channel channel, int xid, SecurityHandler securityHandler, SocketAddress remoteAddress) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
WRITE3Request request;
try {
request = WRITE3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid WRITE request");
return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
}
long offset = request.getOffset();
int count = request.getCount();
WriteStableHow stableHow = request.getStableHow();
byte[] data = request.getData().array();
if (data.length < count) {
LOG.error("Invalid argument, data size is less than count in request");
return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS WRITE fileId: " + handle.getFileId() + " offset: " + offset + " length: " + count + " stableHow: " + stableHow.getValue() + " xid: " + xid + " client: " + remoteAddress);
}
Nfs3FileAttributes preOpAttr = null;
try {
preOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
if (preOpAttr == null) {
LOG.error("Can't get path for fileId: " + handle.getFileId());
return new WRITE3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
return new WRITE3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
}
if (LOG.isDebugEnabled()) {
LOG.debug("requested offset=" + offset + " and current filesize=" + preOpAttr.getSize());
}
writeManager.handleWrite(dfsClient, request, channel, xid, preOpAttr);
} catch (IOException e) {
LOG.info("Error writing to fileId " + handle.getFileId() + " at offset " + offset + " and length " + data.length, e);
// Try to return WccData
Nfs3FileAttributes postOpAttr = null;
try {
postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
} catch (IOException e1) {
LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
}
WccAttr attr = preOpAttr == null ? null : Nfs3Utils.getWccAttr(preOpAttr);
WccData fileWcc = new WccData(attr, postOpAttr);
int status = mapErrorStatus(e);
return new WRITE3Response(status, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
}
return null;
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class RpcProgramNfs3 method lookup.
@VisibleForTesting
LOOKUP3Response lookup(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
LOOKUP3Request request;
try {
request = LOOKUP3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid LOOKUP request");
return new LOOKUP3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle dirHandle = request.getHandle();
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS LOOKUP dir fileId: " + dirHandle.getFileId() + " name: " + fileName + " client: " + remoteAddress);
}
try {
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
Nfs3FileAttributes postOpObjAttr = writeManager.getFileAttr(dfsClient, dirHandle, fileName);
if (postOpObjAttr == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("NFS LOOKUP fileId: " + dirHandle.getFileId() + " name: " + fileName + " does not exist");
}
Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
return new LOOKUP3Response(Nfs3Status.NFS3ERR_NOENT, null, null, postOpDirAttr);
}
Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpDirAttr == null) {
LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
return new LOOKUP3Response(Nfs3Status.NFS3ERR_STALE);
}
FileHandle fileHandle = new FileHandle(postOpObjAttr.getFileId());
return new LOOKUP3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr, postOpDirAttr);
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new LOOKUP3Response(status);
}
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class RpcProgramNfs3 method readdir.
public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
READDIR3Request request;
try {
request = READDIR3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READDIR request");
return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
long cookie = request.getCookie();
if (cookie < 0) {
LOG.error("Invalid READDIR request, with negative cookie: " + cookie);
return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
}
long count = request.getCount();
if (count <= 0) {
LOG.info("Nonpositive count in invalid READDIR request: " + count);
return new READDIR3Response(Nfs3Status.NFS3_OK);
}
if (LOG.isDebugEnabled()) {
LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: " + cookie + " count: " + count + " client: " + remoteAddress);
}
HdfsFileStatus dirStatus;
DirectoryListing dlisting;
Nfs3FileAttributes postOpAttr;
long dotdotFileId = 0;
try {
String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
dirStatus = dfsClient.getFileInfo(dirFileIdPath);
if (dirStatus == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!dirStatus.isDir()) {
LOG.error("Can't readdir for regular file, fileId: " + handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
}
long cookieVerf = request.getCookieVerf();
if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
if (aixCompatMode) {
// The AIX NFS client misinterprets RFC-1813 and will repeatedly send
// the same cookieverf value even across VFS-level readdir calls,
// instead of getting a new cookieverf for every VFS-level readdir
// call, and reusing the cookieverf only in the event that multiple
// incremental NFS-level readdir calls must be made to fetch all of
// the directory entries. This means that whenever a readdir call is
// made by an AIX NFS client for a given directory, and that directory
// is subsequently modified, thus changing its mtime, no later readdir
// calls will succeed from AIX for that directory until the FS is
// unmounted/remounted. See HDFS-6549 for more info.
LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches.");
} else {
LOG.error("CookieVerf mismatch. request cookieVerf: " + cookieVerf + " dir cookieVerf: " + dirStatus.getModificationTime());
return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug));
}
}
if (cookie == 0) {
// Get dotdot fileId
String dotdotFileIdPath = dirFileIdPath + "/..";
HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
if (dotdotStatus == null) {
// This should not happen
throw new IOException("Can't get path for handle path: " + dotdotFileIdPath);
}
dotdotFileId = dotdotStatus.getFileId();
}
// Get the list from the resume point
byte[] startAfter;
if (cookie == 0) {
startAfter = HdfsFileStatus.EMPTY_NAME;
} else {
String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
}
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpAttr == null) {
LOG.error("Can't get path for fileId: " + handle.getFileId());
return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new READDIR3Response(status);
}
/**
* Set up the dirents in the response. fileId is used as the cookie with one
* exception. Linux client can either be stuck with "ls" command (on REHL)
* or report "Too many levels of symbolic links" (Ubuntu).
*
* The problem is that, only two items returned, "." and ".." when the
* namespace is empty. Both of them are "/" with the same cookie(root
* fileId). Linux client doesn't think such a directory is a real directory.
* Even though NFS protocol specifies cookie is an opaque data, Linux client
* somehow doesn't like an empty dir returns same cookie for both "." and
* "..".
*
* The workaround is to use 0 as the cookie for "." and always return "." as
* the first entry in readdir/readdirplus response.
*/
HdfsFileStatus[] fstatus = dlisting.getPartialListing();
int n = (int) Math.min(fstatus.length, count - 2);
boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
Entry3[] entries;
if (cookie == 0) {
entries = new Entry3[n + 2];
entries[0] = new READDIR3Response.Entry3(postOpAttr.getFileId(), ".", 0);
entries[1] = new READDIR3Response.Entry3(dotdotFileId, "..", dotdotFileId);
for (int i = 2; i < n + 2; i++) {
entries[i] = new READDIR3Response.Entry3(fstatus[i - 2].getFileId(), fstatus[i - 2].getLocalName(), fstatus[i - 2].getFileId());
}
} else {
// Resume from last readdirplus. If the cookie is "..", the result
// list is up the directory content since HDFS uses name as resume point.
entries = new Entry3[n];
for (int i = 0; i < n; i++) {
entries[i] = new READDIR3Response.Entry3(fstatus[i].getFileId(), fstatus[i].getLocalName(), fstatus[i].getFileId());
}
}
DirList3 dirList = new READDIR3Response.DirList3(entries, eof);
return new READDIR3Response(Nfs3Status.NFS3_OK, postOpAttr, dirStatus.getModificationTime(), dirList);
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class RpcProgramNfs3 method readdirplus.
@VisibleForTesting
READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES);
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
}
READDIRPLUS3Request request = null;
try {
request = READDIRPLUS3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READDIRPLUS request");
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
long cookie = request.getCookie();
if (cookie < 0) {
LOG.error("Invalid READDIRPLUS request, with negative cookie: " + cookie);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
long dirCount = request.getDirCount();
if (dirCount <= 0) {
LOG.info("Nonpositive dircount in invalid READDIRPLUS request: " + dirCount);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
int maxCount = request.getMaxCount();
if (maxCount <= 0) {
LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: " + maxCount);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
if (LOG.isDebugEnabled()) {
LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: " + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount + " client: " + remoteAddress);
}
HdfsFileStatus dirStatus;
DirectoryListing dlisting;
Nfs3FileAttributes postOpDirAttr;
long dotdotFileId = 0;
HdfsFileStatus dotdotStatus = null;
try {
String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
dirStatus = dfsClient.getFileInfo(dirFileIdPath);
if (dirStatus == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!dirStatus.isDir()) {
LOG.error("Can't readdirplus for regular file, fileId: " + handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
}
long cookieVerf = request.getCookieVerf();
if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
if (aixCompatMode) {
// The AIX NFS client misinterprets RFC-1813 and will repeatedly send
// the same cookieverf value even across VFS-level readdir calls,
// instead of getting a new cookieverf for every VFS-level readdir
// call. This means that whenever a readdir call is made by an AIX NFS
// client for a given directory, and that directory is subsequently
// modified, thus changing its mtime, no later readdir calls will
// succeed for that directory from AIX until the FS is
// unmounted/remounted. See HDFS-6549 for more info.
LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches.");
} else {
LOG.error("cookieverf mismatch. request cookieverf: " + cookieVerf + " dir cookieverf: " + dirStatus.getModificationTime());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug), 0, null);
}
}
if (cookie == 0) {
// Get dotdot fileId
String dotdotFileIdPath = dirFileIdPath + "/..";
dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
if (dotdotStatus == null) {
// This should not happen
throw new IOException("Can't get path for handle path: " + dotdotFileIdPath);
}
dotdotFileId = dotdotStatus.getFileId();
}
// Get the list from the resume point
byte[] startAfter;
if (cookie == 0) {
startAfter = HdfsFileStatus.EMPTY_NAME;
} else {
String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
}
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpDirAttr == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
}
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new READDIRPLUS3Response(status);
}
// Set up the dirents in the response
HdfsFileStatus[] fstatus = dlisting.getPartialListing();
int n = (int) Math.min(fstatus.length, dirCount - 2);
boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
READDIRPLUS3Response.EntryPlus3[] entries;
if (cookie == 0) {
entries = new READDIRPLUS3Response.EntryPlus3[n + 2];
entries[0] = new READDIRPLUS3Response.EntryPlus3(postOpDirAttr.getFileId(), ".", 0, postOpDirAttr, new FileHandle(postOpDirAttr.getFileId()));
entries[1] = new READDIRPLUS3Response.EntryPlus3(dotdotFileId, "..", dotdotFileId, Nfs3Utils.getNfs3FileAttrFromFileStatus(dotdotStatus, iug), new FileHandle(dotdotFileId));
for (int i = 2; i < n + 2; i++) {
long fileId = fstatus[i - 2].getFileId();
FileHandle childHandle = new FileHandle(fileId);
Nfs3FileAttributes attr;
try {
attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
} catch (IOException e) {
LOG.error("Can't get file attributes for fileId: " + fileId, e);
continue;
}
entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i - 2].getLocalName(), fileId, attr, childHandle);
}
} else {
// Resume from last readdirplus. If the cookie is "..", the result
// list is up the directory content since HDFS uses name as resume point.
entries = new READDIRPLUS3Response.EntryPlus3[n];
for (int i = 0; i < n; i++) {
long fileId = fstatus[i].getFileId();
FileHandle childHandle = new FileHandle(fileId);
Nfs3FileAttributes attr;
try {
attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
} catch (IOException e) {
LOG.error("Can't get file attributes for fileId: " + fileId, e);
continue;
}
entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i].getLocalName(), fileId, attr, childHandle);
}
}
DirListPlus3 dirListPlus = new READDIRPLUS3Response.DirListPlus3(entries, eof);
return new READDIRPLUS3Response(Nfs3Status.NFS3_OK, postOpDirAttr, dirStatus.getModificationTime(), dirListPlus);
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class RpcProgramNfs3 method read.
@VisibleForTesting
READ3Response read(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
final String userName = securityHandler.getUser();
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(userName);
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
READ3Request request;
try {
request = READ3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READ request");
return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
}
long offset = request.getOffset();
int count = request.getCount();
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset + " count: " + count + " client: " + remoteAddress);
}
Nfs3FileAttributes attrs;
boolean eof;
if (count == 0) {
// Only do access check.
try {
// Don't read from cache. Client may not have read permission.
attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Get error accessing file, fileId: " + handle.getFileId(), e);
}
return new READ3Response(Nfs3Status.NFS3ERR_IO);
}
if (attrs == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Can't get path for fileId: " + handle.getFileId());
}
return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
}
int access = Nfs3Utils.getAccessRightsForUserGroup(securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs);
if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
eof = offset >= attrs.getSize();
return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof, ByteBuffer.wrap(new byte[0]));
} else {
return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
}
}
// In case there is buffered data for the same file, flush it. This can be
// optimized later by reading from the cache.
int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
if (ret != Nfs3Status.NFS3_OK) {
LOG.warn("commitBeforeRead didn't succeed with ret=" + ret + ". Read may not get most recent data.");
}
try {
int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
int buffSize = Math.min(rtmax, count);
byte[] readbuffer = new byte[buffSize];
int readCount = 0;
/**
* Retry exactly once because the DFSInputStream can be stale.
*/
for (int i = 0; i < 1; ++i) {
FSDataInputStream fis = clientCache.getDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle));
if (fis == null) {
return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
}
try {
readCount = fis.read(offset, readbuffer, 0, count);
metrics.incrBytesRead(readCount);
} catch (IOException e) {
// which requires incompatible changes.
if (e.getMessage().equals("Stream closed")) {
clientCache.invalidateDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle));
continue;
} else {
throw e;
}
}
}
attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug);
if (readCount < count) {
LOG.info("Partial read. Asked offset: " + offset + " count: " + count + " and read back: " + readCount + " file size: " + attrs.getSize());
}
// HDFS returns -1 for read beyond file size.
if (readCount < 0) {
readCount = 0;
}
eof = (offset + readCount) >= attrs.getSize();
return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof, ByteBuffer.wrap(readbuffer));
} catch (IOException e) {
LOG.warn("Read error: " + e.getClass() + " offset: " + offset + " count: " + count, e);
int status = mapErrorStatus(e);
return new READ3Response(status);
}
}
Aggregations