Search in sources :

Example 1 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class Mountd method main.

public static void main(String[] args) throws IOException {
    NfsConfiguration config = new NfsConfiguration();
    Mountd mountd = new Mountd(config, null, true);
    mountd.start(true);
}
Also used : NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)

Example 2 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class TestWrites method testCheckCommitAixCompatMode.

@Test
public void testCheckCommitAixCompatMode() throws IOException {
    DFSClient dfsClient = Mockito.mock(DFSClient.class);
    Nfs3FileAttributes attr = new Nfs3FileAttributes();
    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
    NfsConfiguration conf = new NfsConfiguration();
    conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
    // Enable AIX compatibility mode.
    OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()), true, conf);
    // Test fall-through to pendingWrites check in the event that commitOffset
    // is greater than the number of bytes we've so far flushed.
    Mockito.when(fos.getPos()).thenReturn((long) 2);
    COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
    Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED);
    // Test the case when we actually have received more bytes than we're trying
    // to commit.
    ctx.getPendingWritesForTest().put(new OffsetRange(0, 10), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
    Mockito.when(fos.getPos()).thenReturn((long) 10);
    ctx.setNextOffsetForTest((long) 10);
    status = ctx.checkCommitInternal(5, null, 1, attr, false);
    Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) ShellBasedIdMapping(org.apache.hadoop.security.ShellBasedIdMapping) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) COMMIT_STATUS(org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS) Test(org.junit.Test)

Example 3 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class TestWrites method testOverlappingWrites.

@Test
public void testOverlappingWrites() throws IOException, InterruptedException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    RpcProgramNfs3 nfsd;
    final int bufSize = 32;
    SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
    String currentUser = System.getProperty("user.name");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Nfs3 nfs3 = new Nfs3(config);
        nfs3.startServiceInternal(false);
        nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
        DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
        HdfsFileStatus status = dfsClient.getFileInfo("/");
        FileHandle rootHandle = new FileHandle(status.getFileId());
        CREATE3Request createReq = new CREATE3Request(rootHandle, "overlapping-writes" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
        XDR createXdr = new XDR();
        createReq.serialize(createXdr);
        CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        FileHandle handle = createRsp.getObjHandle();
        byte[] buffer = new byte[bufSize];
        for (int i = 0; i < bufSize; i++) {
            buffer[i] = (byte) i;
        }
        int[][] ranges = new int[][] { { 0, 10 }, { 5, 7 }, { 5, 5 }, { 10, 6 }, { 18, 6 }, { 20, 6 }, { 28, 4 }, { 16, 2 }, { 25, 4 } };
        for (int i = 0; i < ranges.length; i++) {
            int[] x = ranges[i];
            byte[] tbuffer = new byte[x[1]];
            for (int j = 0; j < x[1]; j++) {
                tbuffer[j] = buffer[x[0] + j];
            }
            WRITE3Request writeReq = new WRITE3Request(handle, (long) x[0], x[1], WriteStableHow.UNSTABLE, ByteBuffer.wrap(tbuffer));
            XDR writeXdr = new XDR();
            writeReq.serialize(writeXdr);
            nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
        }
        waitWrite(nfsd, handle, 60000);
        READ3Request readReq = new READ3Request(handle, 0, bufSize);
        XDR readXdr = new XDR();
        readReq.serialize(readXdr);
        READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
        assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) SecurityHandler(org.apache.hadoop.oncrpc.security.SecurityHandler) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) XDR(org.apache.hadoop.oncrpc.XDR) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) READ3Request(org.apache.hadoop.nfs.nfs3.request.READ3Request) CREATE3Response(org.apache.hadoop.nfs.nfs3.response.CREATE3Response) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) CREATE3Request(org.apache.hadoop.nfs.nfs3.request.CREATE3Request) READ3Response(org.apache.hadoop.nfs.nfs3.response.READ3Response) Test(org.junit.Test)

Example 4 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class TestWrites method testCheckCommitFromReadLargeFileUpload.

@Test
public // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS with large file upload option
void testCheckCommitFromReadLargeFileUpload() throws IOException {
    DFSClient dfsClient = Mockito.mock(DFSClient.class);
    Nfs3FileAttributes attr = new Nfs3FileAttributes();
    HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
    Mockito.when(fos.getPos()).thenReturn((long) 0);
    NfsConfiguration config = new NfsConfiguration();
    config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, true);
    OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(config), false, config);
    // fake handle for "/dumpFilePath"
    FileHandle h = new FileHandle(1);
    COMMIT_STATUS ret;
    WriteManager wm = new WriteManager(new ShellBasedIdMapping(config), config, false);
    assertTrue(wm.addOpenFileStream(h, ctx));
    // Test inactive open file context
    ctx.setActiveStatusForTest(false);
    Channel ch = Mockito.mock(Channel.class);
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret);
    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0));
    ctx.getPendingWritesForTest().put(new OffsetRange(10, 15), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret);
    assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0));
    // Test request with non zero commit offset
    ctx.setActiveStatusForTest(true);
    Mockito.when(fos.getPos()).thenReturn((long) 6);
    ctx.setNextOffsetForTest((long) 10);
    COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false);
    assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status);
    // Do_SYNC state will be updated to FINISHED after data sync
    ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret);
    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5));
    // Test request with sequential writes
    status = ctx.checkCommitInternal(9, ch, 1, attr, true);
    assertTrue(status == COMMIT_STATUS.COMMIT_SPECIAL_WAIT);
    ret = ctx.checkCommit(dfsClient, 9, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_WAIT, ret);
    assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 9));
    // Test request with non-sequential writes
    ConcurrentNavigableMap<Long, CommitCtx> commits = ctx.getPendingCommitsForTest();
    assertTrue(commits.size() == 0);
    ret = ctx.checkCommit(dfsClient, 16, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS, ret);
    // commit triggered by read doesn't wait
    assertEquals(0, commits.size());
    assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 16));
    // Test request with zero commit offset
    // There is one pending write [10,15]
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_WAIT, ret);
    assertEquals(0, commits.size());
    assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
    // Empty pending writes
    ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15));
    ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true);
    assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_WAIT, ret);
    assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0));
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) CommitCtx(org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) Channel(org.jboss.netty.channel.Channel) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) COMMIT_STATUS(org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS) ShellBasedIdMapping(org.apache.hadoop.security.ShellBasedIdMapping) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) Test(org.junit.Test)

Example 5 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class TestWrites method testOOOWrites.

@Test
public void testOOOWrites() throws IOException, InterruptedException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    RpcProgramNfs3 nfsd;
    final int bufSize = 32;
    final int numOOO = 3;
    SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
    String currentUser = System.getProperty("user.name");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Nfs3 nfs3 = new Nfs3(config);
        nfs3.startServiceInternal(false);
        nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
        DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
        HdfsFileStatus status = dfsClient.getFileInfo("/");
        FileHandle rootHandle = new FileHandle(status.getFileId());
        CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
        XDR createXdr = new XDR();
        createReq.serialize(createXdr);
        CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        FileHandle handle = createRsp.getObjHandle();
        byte[][] oooBuf = new byte[numOOO][bufSize];
        for (int i = 0; i < numOOO; i++) {
            Arrays.fill(oooBuf[i], (byte) i);
        }
        for (int i = 0; i < numOOO; i++) {
            final long offset = (numOOO - 1 - i) * bufSize;
            WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
            XDR writeXdr = new XDR();
            writeReq.serialize(writeXdr);
            nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
        }
        waitWrite(nfsd, handle, 60000);
        READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
        XDR readXdr = new XDR();
        readReq.serialize(readXdr);
        READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
        assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) SecurityHandler(org.apache.hadoop.oncrpc.security.SecurityHandler) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) XDR(org.apache.hadoop.oncrpc.XDR) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) READ3Request(org.apache.hadoop.nfs.nfs3.request.READ3Request) CREATE3Response(org.apache.hadoop.nfs.nfs3.response.CREATE3Response) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) CREATE3Request(org.apache.hadoop.nfs.nfs3.request.CREATE3Request) READ3Response(org.apache.hadoop.nfs.nfs3.response.READ3Response) Test(org.junit.Test)

Aggregations

NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)21 Test (org.junit.Test)17 DFSClient (org.apache.hadoop.hdfs.DFSClient)12 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)8 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)8 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 COMMIT_STATUS (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS)5 CommitCtx (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx)5 XDR (org.apache.hadoop.oncrpc.XDR)5 InetSocketAddress (java.net.InetSocketAddress)4 Channel (org.jboss.netty.channel.Channel)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 CREATE3Request (org.apache.hadoop.nfs.nfs3.request.CREATE3Request)3 READ3Request (org.apache.hadoop.nfs.nfs3.request.READ3Request)3 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)3 WRITE3Request (org.apache.hadoop.nfs.nfs3.request.WRITE3Request)3 CREATE3Response (org.apache.hadoop.nfs.nfs3.response.CREATE3Response)3 READ3Response (org.apache.hadoop.nfs.nfs3.response.READ3Response)3