Search in sources :

Example 16 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class TestOpenFileCtxCache method testScan.

@Test
public void testScan() throws IOException, InterruptedException {
    NfsConfiguration conf = new NfsConfiguration();
    // Only two entries will be in the cache
    conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
    DFSClient dfsClient = Mockito.mock(DFSClient.class);
    Nfs3FileAttributes attr = new Nfs3FileAttributes();
    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
    Mockito.when(fos.getPos()).thenReturn((long) 0);
    OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
    OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
    OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
    OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
    OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
    // Test cleaning expired entry
    boolean ret = cache.put(new FileHandle(1), context1);
    assertTrue(ret);
    ret = cache.put(new FileHandle(2), context2);
    assertTrue(ret);
    Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + 1);
    cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
    assertTrue(cache.size() == 0);
    // Test cleaning inactive entry
    ret = cache.put(new FileHandle(3), context3);
    assertTrue(ret);
    ret = cache.put(new FileHandle(4), context4);
    assertTrue(ret);
    context3.setActiveStatusForTest(false);
    cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
    assertTrue(cache.size() == 1);
    assertTrue(cache.get(new FileHandle(3)) == null);
    assertTrue(cache.get(new FileHandle(4)) != null);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) ShellBasedIdMapping(org.apache.hadoop.security.ShellBasedIdMapping) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) Test(org.junit.Test)

Example 17 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class TestReaddir method testReaddirBasic.

@Test
public void testReaddirBasic() throws IOException {
    // Get inodeId of /tmp
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    // Create related part of the XDR request
    XDR xdr_req = new XDR();
    FileHandle handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    // cookie
    xdr_req.writeLongAsHyper(0);
    // verifier
    xdr_req.writeLongAsHyper(0);
    // count
    xdr_req.writeInt(100);
    READDIR3Response response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    List<Entry3> dirents = response.getDirList().getEntries();
    // inculding dot, dotdot
    assertTrue(dirents.size() == 5);
    // Test start listing from f2
    status = nn.getRpcServer().getFileInfo(testdir + "/f2");
    long f2Id = status.getFileId();
    // Create related part of the XDR request
    xdr_req = new XDR();
    handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    // cookie
    xdr_req.writeLongAsHyper(f2Id);
    // verifier
    xdr_req.writeLongAsHyper(0);
    // count
    xdr_req.writeInt(100);
    response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    dirents = response.getDirList().getEntries();
    assertTrue(dirents.size() == 1);
    Entry3 entry = dirents.get(0);
    assertTrue(entry.getName().equals("f3"));
    // When the cookie is deleted, list starts over no including dot, dotdot
    hdfs.delete(new Path(testdir + "/f2"), false);
    response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    dirents = response.getDirList().getEntries();
    // No dot, dotdot
    assertTrue(dirents.size() == 2);
}
Also used : Path(org.apache.hadoop.fs.Path) Entry3(org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) READDIR3Response(org.apache.hadoop.nfs.nfs3.response.READDIR3Response) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) Test(org.junit.Test)

Example 18 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class TestReaddir method testReaddirPlus.

@Test
public // Test readdirplus
void testReaddirPlus() throws IOException {
    // Get inodeId of /tmp
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    // Create related part of the XDR request
    XDR xdr_req = new XDR();
    FileHandle handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    // cookie
    xdr_req.writeLongAsHyper(0);
    // verifier
    xdr_req.writeLongAsHyper(0);
    // dirCount
    xdr_req.writeInt(100);
    // maxCount
    xdr_req.writeInt(1000);
    READDIRPLUS3Response responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    List<EntryPlus3> direntPlus = responsePlus.getDirListPlus().getEntries();
    // including dot, dotdot
    assertTrue(direntPlus.size() == 5);
    // Test start listing from f2
    status = nn.getRpcServer().getFileInfo(testdir + "/f2");
    long f2Id = status.getFileId();
    // Create related part of the XDR request
    xdr_req = new XDR();
    handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    // cookie
    xdr_req.writeLongAsHyper(f2Id);
    // verifier
    xdr_req.writeLongAsHyper(0);
    // dirCount
    xdr_req.writeInt(100);
    // maxCount
    xdr_req.writeInt(1000);
    responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    direntPlus = responsePlus.getDirListPlus().getEntries();
    assertTrue(direntPlus.size() == 1);
    EntryPlus3 entryPlus = direntPlus.get(0);
    assertTrue(entryPlus.getName().equals("f3"));
    // When the cookie is deleted, list starts over no including dot, dotdot
    hdfs.delete(new Path(testdir + "/f2"), false);
    responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    direntPlus = responsePlus.getDirListPlus().getEntries();
    // No dot, dotdot
    assertTrue(direntPlus.size() == 2);
}
Also used : Path(org.apache.hadoop.fs.Path) EntryPlus3(org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response.EntryPlus3) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) READDIRPLUS3Response(org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response) Test(org.junit.Test)

Example 19 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class TestRpcProgramNfs3 method testReaddirplus.

@Test(timeout = 60000)
public void testReaddirplus() throws Exception {
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    FileHandle handle = new FileHandle(dirId);
    XDR xdr_req = new XDR();
    READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
    req.serialize(xdr_req);
    // Attempt by an unprivileged user should fail.
    READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
    // Attempt by a privileged user should pass.
    READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus());
}
Also used : FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) READDIRPLUS3Response(org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response) READDIRPLUS3Request(org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request) Test(org.junit.Test)

Example 20 with FileHandle

use of org.apache.hadoop.nfs.nfs3.FileHandle in project hadoop by apache.

the class TestRpcProgramNfs3 method testCreate.

@Test(timeout = 60000)
public void testCreate() throws Exception {
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    XDR xdr_req = new XDR();
    FileHandle handle = new FileHandle(dirId);
    CREATE3Request req = new CREATE3Request(handle, "fubar", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    req.serialize(xdr_req);
    // Attempt by an unpriviledged user should fail.
    CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
    // Attempt by a priviledged user should pass.
    CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus());
}
Also used : SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) CREATE3Response(org.apache.hadoop.nfs.nfs3.response.CREATE3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) CREATE3Request(org.apache.hadoop.nfs.nfs3.request.CREATE3Request) Test(org.junit.Test)

Aggregations

FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)79 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)34 XDR (org.apache.hadoop.oncrpc.XDR)34 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)31 Test (org.junit.Test)31 InetSocketAddress (java.net.InetSocketAddress)28 DFSClient (org.apache.hadoop.hdfs.DFSClient)26 IOException (java.io.IOException)24 VisibleForTesting (com.google.common.annotations.VisibleForTesting)19 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)12 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)11 WRITE3Request (org.apache.hadoop.nfs.nfs3.request.WRITE3Request)8 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)7 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)6 CREATE3Request (org.apache.hadoop.nfs.nfs3.request.CREATE3Request)6 READ3Request (org.apache.hadoop.nfs.nfs3.request.READ3Request)5 CREATE3Response (org.apache.hadoop.nfs.nfs3.response.CREATE3Response)5 READ3Response (org.apache.hadoop.nfs.nfs3.response.READ3Response)5 WRITE3Response (org.apache.hadoop.nfs.nfs3.response.WRITE3Response)5 VerifierNone (org.apache.hadoop.oncrpc.security.VerifierNone)5