use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class TestOpenFileCtxCache method testScan.
@Test
public void testScan() throws IOException, InterruptedException {
NfsConfiguration conf = new NfsConfiguration();
// Only two entries will be in the cache
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
// Test cleaning expired entry
boolean ret = cache.put(new FileHandle(1), context1);
assertTrue(ret);
ret = cache.put(new FileHandle(2), context2);
assertTrue(ret);
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + 1);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 0);
// Test cleaning inactive entry
ret = cache.put(new FileHandle(3), context3);
assertTrue(ret);
ret = cache.put(new FileHandle(4), context4);
assertTrue(ret);
context3.setActiveStatusForTest(false);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
assertTrue(cache.size() == 1);
assertTrue(cache.get(new FileHandle(3)) == null);
assertTrue(cache.get(new FileHandle(4)) != null);
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class TestReaddir method testReaddirBasic.
@Test
public void testReaddirBasic() throws IOException {
// Get inodeId of /tmp
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
// Create related part of the XDR request
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
// cookie
xdr_req.writeLongAsHyper(0);
// verifier
xdr_req.writeLongAsHyper(0);
// count
xdr_req.writeInt(100);
READDIR3Response response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
List<Entry3> dirents = response.getDirList().getEntries();
// inculding dot, dotdot
assertTrue(dirents.size() == 5);
// Test start listing from f2
status = nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id = status.getFileId();
// Create related part of the XDR request
xdr_req = new XDR();
handle = new FileHandle(dirId);
handle.serialize(xdr_req);
// cookie
xdr_req.writeLongAsHyper(f2Id);
// verifier
xdr_req.writeLongAsHyper(0);
// count
xdr_req.writeInt(100);
response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
dirents = response.getDirList().getEntries();
assertTrue(dirents.size() == 1);
Entry3 entry = dirents.get(0);
assertTrue(entry.getName().equals("f3"));
// When the cookie is deleted, list starts over no including dot, dotdot
hdfs.delete(new Path(testdir + "/f2"), false);
response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
dirents = response.getDirList().getEntries();
// No dot, dotdot
assertTrue(dirents.size() == 2);
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class TestReaddir method testReaddirPlus.
@Test
public // Test readdirplus
void testReaddirPlus() throws IOException {
// Get inodeId of /tmp
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
// Create related part of the XDR request
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
// cookie
xdr_req.writeLongAsHyper(0);
// verifier
xdr_req.writeLongAsHyper(0);
// dirCount
xdr_req.writeInt(100);
// maxCount
xdr_req.writeInt(1000);
READDIRPLUS3Response responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
List<EntryPlus3> direntPlus = responsePlus.getDirListPlus().getEntries();
// including dot, dotdot
assertTrue(direntPlus.size() == 5);
// Test start listing from f2
status = nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id = status.getFileId();
// Create related part of the XDR request
xdr_req = new XDR();
handle = new FileHandle(dirId);
handle.serialize(xdr_req);
// cookie
xdr_req.writeLongAsHyper(f2Id);
// verifier
xdr_req.writeLongAsHyper(0);
// dirCount
xdr_req.writeInt(100);
// maxCount
xdr_req.writeInt(1000);
responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
direntPlus = responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 1);
EntryPlus3 entryPlus = direntPlus.get(0);
assertTrue(entryPlus.getName().equals("f3"));
// When the cookie is deleted, list starts over no including dot, dotdot
hdfs.delete(new Path(testdir + "/f2"), false);
responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
direntPlus = responsePlus.getDirListPlus().getEntries();
// No dot, dotdot
assertTrue(direntPlus.size() == 2);
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class TestRpcProgramNfs3 method testReaddirplus.
@Test(timeout = 60000)
public void testReaddirplus() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
FileHandle handle = new FileHandle(dirId);
XDR xdr_req = new XDR();
READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
req.serialize(xdr_req);
// Attempt by an unprivileged user should fail.
READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
// Attempt by a privileged user should pass.
READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus());
}
use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.
the class TestRpcProgramNfs3 method testCreate.
@Test(timeout = 60000)
public void testCreate() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
CREATE3Request req = new CREATE3Request(handle, "fubar", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
// Attempt by a priviledged user should pass.
CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus());
}
Aggregations