Search in sources :

Example 86 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestRpcProgramNfs3 method testLookup.

@Test(timeout = 60000)
public void testLookup() throws Exception {
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    FileHandle handle = new FileHandle(dirId);
    LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar");
    XDR xdr_req = new XDR();
    lookupReq.serialize(xdr_req);
    // Attempt by an unpriviledged user should fail.
    LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
    // Attempt by a priviledged user should pass.
    LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus());
}
Also used : FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) LOOKUP3Request(org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request) XDR(org.apache.hadoop.oncrpc.XDR) LOOKUP3Response(org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response) Test(org.junit.Test)

Example 87 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestRpcProgramNfs3 method createFileUsingNfs.

private void createFileUsingNfs(String fileName, byte[] buffer) throws Exception {
    DFSTestUtil.createFile(hdfs, new Path(fileName), 0, (short) 1, 0);
    final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
    final long dirId = status.getFileId();
    final FileHandle handle = new FileHandle(dirId);
    final WRITE3Request writeReq = new WRITE3Request(handle, 0, buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
    final XDR xdr_req = new XDR();
    writeReq.serialize(xdr_req);
    final WRITE3Response response = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect response: ", null, response);
}
Also used : Path(org.apache.hadoop.fs.Path) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) WRITE3Response(org.apache.hadoop.nfs.nfs3.response.WRITE3Response)

Example 88 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestRpcProgramNfs3 method testSetattr.

@Test(timeout = 60000)
public void testSetattr() throws Exception {
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
    XDR xdr_req = new XDR();
    FileHandle handle = new FileHandle(dirId);
    SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null, EnumSet.of(SetAttrField.UID));
    SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null);
    req.serialize(xdr_req);
    // Attempt by an unprivileged user should fail.
    SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
    // Attempt by a priviledged user should pass.
    SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus());
}
Also used : SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) SETATTR3Request(org.apache.hadoop.nfs.nfs3.request.SETATTR3Request) SETATTR3Response(org.apache.hadoop.nfs.nfs3.response.SETATTR3Response) Test(org.junit.Test)

Example 89 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestRpcProgramNfs3 method testWrite.

@Test(timeout = 60000)
public void testWrite() throws Exception {
    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
    long dirId = status.getFileId();
    FileHandle handle = new FileHandle(dirId);
    byte[] buffer = new byte[10];
    for (int i = 0; i < 10; i++) {
        buffer[i] = (byte) i;
    }
    WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
    XDR xdr_req = new XDR();
    writeReq.serialize(xdr_req);
    // Attempt by an unpriviledged user should fail.
    WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus());
    // Attempt by a priviledged user should pass.
    WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect response:", null, response2);
}
Also used : FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) WRITE3Response(org.apache.hadoop.nfs.nfs3.response.WRITE3Response) Test(org.junit.Test)

Example 90 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class TestRpcProgramNfs3 method commit.

private void commit(String fileName, int len) throws Exception {
    final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
    final long dirId = status.getFileId();
    final FileHandle handle = new FileHandle(dirId);
    final XDR xdr_req = new XDR();
    final COMMIT3Request req = new COMMIT3Request(handle, 0, len);
    req.serialize(xdr_req);
    Channel ch = Mockito.mock(Channel.class);
    COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandler, new InetSocketAddress("localhost", 1234));
    assertEquals("Incorrect COMMIT3Response:", null, response2);
}
Also used : COMMIT3Response(org.apache.hadoop.nfs.nfs3.response.COMMIT3Response) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) XDR(org.apache.hadoop.oncrpc.XDR) Channel(org.jboss.netty.channel.Channel) COMMIT3Request(org.apache.hadoop.nfs.nfs3.request.COMMIT3Request)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6