use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class RpcProgramNfs3 method readdirplus.
@VisibleForTesting
READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES);
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
}
READDIRPLUS3Request request = null;
try {
request = READDIRPLUS3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid READDIRPLUS request");
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
long cookie = request.getCookie();
if (cookie < 0) {
LOG.error("Invalid READDIRPLUS request, with negative cookie: " + cookie);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
long dirCount = request.getDirCount();
if (dirCount <= 0) {
LOG.info("Nonpositive dircount in invalid READDIRPLUS request: " + dirCount);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
int maxCount = request.getMaxCount();
if (maxCount <= 0) {
LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: " + maxCount);
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
}
if (LOG.isDebugEnabled()) {
LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: " + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount + " client: " + remoteAddress);
}
HdfsFileStatus dirStatus;
DirectoryListing dlisting;
Nfs3FileAttributes postOpDirAttr;
long dotdotFileId = 0;
HdfsFileStatus dotdotStatus = null;
try {
String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
dirStatus = dfsClient.getFileInfo(dirFileIdPath);
if (dirStatus == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!dirStatus.isDir()) {
LOG.error("Can't readdirplus for regular file, fileId: " + handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
}
long cookieVerf = request.getCookieVerf();
if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
if (aixCompatMode) {
// The AIX NFS client misinterprets RFC-1813 and will repeatedly send
// the same cookieverf value even across VFS-level readdir calls,
// instead of getting a new cookieverf for every VFS-level readdir
// call. This means that whenever a readdir call is made by an AIX NFS
// client for a given directory, and that directory is subsequently
// modified, thus changing its mtime, no later readdir calls will
// succeed for that directory from AIX until the FS is
// unmounted/remounted. See HDFS-6549 for more info.
LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches.");
} else {
LOG.error("cookieverf mismatch. request cookieverf: " + cookieVerf + " dir cookieverf: " + dirStatus.getModificationTime());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug), 0, null);
}
}
if (cookie == 0) {
// Get dotdot fileId
String dotdotFileIdPath = dirFileIdPath + "/..";
dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);
if (dotdotStatus == null) {
// This should not happen
throw new IOException("Can't get path for handle path: " + dotdotFileIdPath);
}
dotdotFileId = dotdotStatus.getFileId();
}
// Get the list from the resume point
byte[] startAfter;
if (cookie == 0) {
startAfter = HdfsFileStatus.EMPTY_NAME;
} else {
String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8"));
}
dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpDirAttr == null) {
LOG.info("Can't get path for fileId: " + handle.getFileId());
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
}
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new READDIRPLUS3Response(status);
}
// Set up the dirents in the response
HdfsFileStatus[] fstatus = dlisting.getPartialListing();
int n = (int) Math.min(fstatus.length, dirCount - 2);
boolean eof = (n >= fstatus.length) && !dlisting.hasMore();
READDIRPLUS3Response.EntryPlus3[] entries;
if (cookie == 0) {
entries = new READDIRPLUS3Response.EntryPlus3[n + 2];
entries[0] = new READDIRPLUS3Response.EntryPlus3(postOpDirAttr.getFileId(), ".", 0, postOpDirAttr, new FileHandle(postOpDirAttr.getFileId()));
entries[1] = new READDIRPLUS3Response.EntryPlus3(dotdotFileId, "..", dotdotFileId, Nfs3Utils.getNfs3FileAttrFromFileStatus(dotdotStatus, iug), new FileHandle(dotdotFileId));
for (int i = 2; i < n + 2; i++) {
long fileId = fstatus[i - 2].getFileId();
FileHandle childHandle = new FileHandle(fileId);
Nfs3FileAttributes attr;
try {
attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
} catch (IOException e) {
LOG.error("Can't get file attributes for fileId: " + fileId, e);
continue;
}
entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i - 2].getLocalName(), fileId, attr, childHandle);
}
} else {
// Resume from last readdirplus. If the cookie is "..", the result
// list is up the directory content since HDFS uses name as resume point.
entries = new READDIRPLUS3Response.EntryPlus3[n];
for (int i = 0; i < n; i++) {
long fileId = fstatus[i].getFileId();
FileHandle childHandle = new FileHandle(fileId);
Nfs3FileAttributes attr;
try {
attr = writeManager.getFileAttr(dfsClient, childHandle, iug);
} catch (IOException e) {
LOG.error("Can't get file attributes for fileId: " + fileId, e);
continue;
}
entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i].getLocalName(), fileId, attr, childHandle);
}
}
DirListPlus3 dirListPlus = new READDIRPLUS3Response.DirListPlus3(entries, eof);
return new READDIRPLUS3Response(Nfs3Status.NFS3_OK, postOpDirAttr, dirStatus.getModificationTime(), dirListPlus);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestWrites method testOverlappingWrites.
@Test
public void testOverlappingWrites() throws IOException, InterruptedException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
RpcProgramNfs3 nfsd;
final int bufSize = 32;
SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser = System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
Nfs3 nfs3 = new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
HdfsFileStatus status = dfsClient.getFileInfo("/");
FileHandle rootHandle = new FileHandle(status.getFileId());
CREATE3Request createReq = new CREATE3Request(rootHandle, "overlapping-writes" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
XDR createXdr = new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
FileHandle handle = createRsp.getObjHandle();
byte[] buffer = new byte[bufSize];
for (int i = 0; i < bufSize; i++) {
buffer[i] = (byte) i;
}
int[][] ranges = new int[][] { { 0, 10 }, { 5, 7 }, { 5, 5 }, { 10, 6 }, { 18, 6 }, { 20, 6 }, { 28, 4 }, { 16, 2 }, { 25, 4 } };
for (int i = 0; i < ranges.length; i++) {
int[] x = ranges[i];
byte[] tbuffer = new byte[x[1]];
for (int j = 0; j < x[1]; j++) {
tbuffer[j] = buffer[x[0] + j];
}
WRITE3Request writeReq = new WRITE3Request(handle, (long) x[0], x[1], WriteStableHow.UNSTABLE, ByteBuffer.wrap(tbuffer));
XDR writeXdr = new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
}
waitWrite(nfsd, handle, 60000);
READ3Request readReq = new READ3Request(handle, 0, bufSize);
XDR readXdr = new XDR();
readReq.serialize(readXdr);
READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestWrites method testOOOWrites.
@Test
public void testOOOWrites() throws IOException, InterruptedException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
RpcProgramNfs3 nfsd;
final int bufSize = 32;
final int numOOO = 3;
SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser = System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
Nfs3 nfs3 = new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
HdfsFileStatus status = dfsClient.getFileInfo("/");
FileHandle rootHandle = new FileHandle(status.getFileId());
CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
XDR createXdr = new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
FileHandle handle = createRsp.getObjHandle();
byte[][] oooBuf = new byte[numOOO][bufSize];
for (int i = 0; i < numOOO; i++) {
Arrays.fill(oooBuf[i], (byte) i);
}
for (int i = 0; i < numOOO; i++) {
final long offset = (numOOO - 1 - i) * bufSize;
WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
XDR writeXdr = new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
}
waitWrite(nfsd, handle, 60000);
READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
XDR readXdr = new XDR();
readReq.serialize(readXdr);
READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class RpcProgramNfs3 method symlink.
@VisibleForTesting
SYMLINK3Response symlink(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
SYMLINK3Response response = new SYMLINK3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
SYMLINK3Request request;
try {
request = SYMLINK3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid SYMLINK request");
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
return response;
}
FileHandle dirHandle = request.getHandle();
String name = request.getName();
String symData = request.getSymData();
String linkDirIdPath = Nfs3Utils.getFileIdPath(dirHandle);
// Don't do any name check to source path, just leave it to HDFS
String linkIdPath = linkDirIdPath + "/" + name;
if (LOG.isDebugEnabled()) {
LOG.debug("NFS SYMLINK, target: " + symData + " link: " + linkIdPath + " client: " + remoteAddress);
}
try {
WccData dirWcc = response.getDirWcc();
WccAttr preOpAttr = Nfs3Utils.getWccAttr(dfsClient, linkDirIdPath);
dirWcc.setPreOpAttr(preOpAttr);
dfsClient.createSymlink(symData, linkIdPath, false);
// Set symlink attr is considered as to change the attr of the target
// file. So no need to set symlink attr here after it's created.
HdfsFileStatus linkstat = dfsClient.getFileLinkInfo(linkIdPath);
Nfs3FileAttributes objAttr = Nfs3Utils.getNfs3FileAttrFromFileStatus(linkstat, iug);
dirWcc.setPostOpAttr(Nfs3Utils.getFileAttr(dfsClient, linkDirIdPath, iug));
return new SYMLINK3Response(Nfs3Status.NFS3_OK, new FileHandle(objAttr.getFileId()), objAttr, dirWcc);
} catch (IOException e) {
LOG.warn("Exception: " + e);
int status = mapErrorStatus(e);
response.setStatus(status);
return response;
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestReaddir method testReaddirBasic.
@Test
public void testReaddirBasic() throws IOException {
// Get inodeId of /tmp
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
// Create related part of the XDR request
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
// cookie
xdr_req.writeLongAsHyper(0);
// verifier
xdr_req.writeLongAsHyper(0);
// count
xdr_req.writeInt(100);
READDIR3Response response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
List<Entry3> dirents = response.getDirList().getEntries();
// inculding dot, dotdot
assertTrue(dirents.size() == 5);
// Test start listing from f2
status = nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id = status.getFileId();
// Create related part of the XDR request
xdr_req = new XDR();
handle = new FileHandle(dirId);
handle.serialize(xdr_req);
// cookie
xdr_req.writeLongAsHyper(f2Id);
// verifier
xdr_req.writeLongAsHyper(0);
// count
xdr_req.writeInt(100);
response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
dirents = response.getDirList().getEntries();
assertTrue(dirents.size() == 1);
Entry3 entry = dirents.get(0);
assertTrue(entry.getName().equals("f3"));
// When the cookie is deleted, list starts over no including dot, dotdot
hdfs.delete(new Path(testdir + "/f2"), false);
response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
dirents = response.getDirList().getEntries();
// No dot, dotdot
assertTrue(dirents.size() == 2);
}
Aggregations