Search in sources :

Example 86 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNameNodeRespectsBindHostKeys method testLifelineRpcBindHostKey.

@Test(timeout = 300000)
public void testLifelineRpcBindHostKey() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    LOG.info("Testing without " + DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY);
    conf.set(DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
    // NN should not bind the wildcard address by default.
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        String address = getLifelineRpcServerAddress(cluster);
        assertThat("Bind address not expected to be wildcard by default.", address, not("/" + WILDCARD_ADDRESS));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }
    LOG.info("Testing with " + DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY);
    // Tell NN to bind the wildcard address.
    conf.set(DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
    // Verify that NN binds wildcard address now.
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        String address = getLifelineRpcServerAddress(cluster);
        assertThat("Bind address " + address + " is not wildcard.", address, is("/" + WILDCARD_ADDRESS));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 87 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestWrites method testOverlappingWrites.

@Test
public void testOverlappingWrites() throws IOException, InterruptedException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    RpcProgramNfs3 nfsd;
    final int bufSize = 32;
    SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
    String currentUser = System.getProperty("user.name");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Nfs3 nfs3 = new Nfs3(config);
        nfs3.startServiceInternal(false);
        nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
        DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
        HdfsFileStatus status = dfsClient.getFileInfo("/");
        FileHandle rootHandle = new FileHandle(status.getFileId());
        CREATE3Request createReq = new CREATE3Request(rootHandle, "overlapping-writes" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
        XDR createXdr = new XDR();
        createReq.serialize(createXdr);
        CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        FileHandle handle = createRsp.getObjHandle();
        byte[] buffer = new byte[bufSize];
        for (int i = 0; i < bufSize; i++) {
            buffer[i] = (byte) i;
        }
        int[][] ranges = new int[][] { { 0, 10 }, { 5, 7 }, { 5, 5 }, { 10, 6 }, { 18, 6 }, { 20, 6 }, { 28, 4 }, { 16, 2 }, { 25, 4 } };
        for (int i = 0; i < ranges.length; i++) {
            int[] x = ranges[i];
            byte[] tbuffer = new byte[x[1]];
            for (int j = 0; j < x[1]; j++) {
                tbuffer[j] = buffer[x[0] + j];
            }
            WRITE3Request writeReq = new WRITE3Request(handle, (long) x[0], x[1], WriteStableHow.UNSTABLE, ByteBuffer.wrap(tbuffer));
            XDR writeXdr = new XDR();
            writeReq.serialize(writeXdr);
            nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
        }
        waitWrite(nfsd, handle, 60000);
        READ3Request readReq = new READ3Request(handle, 0, bufSize);
        XDR readXdr = new XDR();
        readReq.serialize(readXdr);
        READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
        assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) SecurityHandler(org.apache.hadoop.oncrpc.security.SecurityHandler) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) XDR(org.apache.hadoop.oncrpc.XDR) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) READ3Request(org.apache.hadoop.nfs.nfs3.request.READ3Request) CREATE3Response(org.apache.hadoop.nfs.nfs3.response.CREATE3Response) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) CREATE3Request(org.apache.hadoop.nfs.nfs3.request.CREATE3Request) READ3Response(org.apache.hadoop.nfs.nfs3.response.READ3Response) Test(org.junit.Test)

Example 88 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestWrites method testOOOWrites.

@Test
public void testOOOWrites() throws IOException, InterruptedException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    RpcProgramNfs3 nfsd;
    final int bufSize = 32;
    final int numOOO = 3;
    SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
    String currentUser = System.getProperty("user.name");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Nfs3 nfs3 = new Nfs3(config);
        nfs3.startServiceInternal(false);
        nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
        DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
        HdfsFileStatus status = dfsClient.getFileInfo("/");
        FileHandle rootHandle = new FileHandle(status.getFileId());
        CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
        XDR createXdr = new XDR();
        createReq.serialize(createXdr);
        CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        FileHandle handle = createRsp.getObjHandle();
        byte[][] oooBuf = new byte[numOOO][bufSize];
        for (int i = 0; i < numOOO; i++) {
            Arrays.fill(oooBuf[i], (byte) i);
        }
        for (int i = 0; i < numOOO; i++) {
            final long offset = (numOOO - 1 - i) * bufSize;
            WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
            XDR writeXdr = new XDR();
            writeReq.serialize(writeXdr);
            nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
        }
        waitWrite(nfsd, handle, 60000);
        READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
        XDR readXdr = new XDR();
        readReq.serialize(readXdr);
        READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
        assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) SecurityHandler(org.apache.hadoop.oncrpc.security.SecurityHandler) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) InetSocketAddress(java.net.InetSocketAddress) XDR(org.apache.hadoop.oncrpc.XDR) WRITE3Request(org.apache.hadoop.nfs.nfs3.request.WRITE3Request) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) SetAttr3(org.apache.hadoop.nfs.nfs3.request.SetAttr3) READ3Request(org.apache.hadoop.nfs.nfs3.request.READ3Request) CREATE3Response(org.apache.hadoop.nfs.nfs3.response.CREATE3Response) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) CREATE3Request(org.apache.hadoop.nfs.nfs3.request.CREATE3Request) READ3Response(org.apache.hadoop.nfs.nfs3.response.READ3Response) Test(org.junit.Test)

Example 89 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestMountd method testStart.

@Test
public void testStart() throws IOException {
    // Start minicluster
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    // 1s
    int newTimeoutMillis = 1000;
    // Set the new portmap rpc timeout values and check
    config.setInt(NfsConfigKeys.NFS_UDP_CLIENT_PORTMAP_TIMEOUT_MILLIS_KEY, newTimeoutMillis);
    assertTrue(config.getInt(NfsConfigKeys.NFS_UDP_CLIENT_PORTMAP_TIMEOUT_MILLIS_KEY, 0) == newTimeoutMillis);
    // Start nfs
    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd().getRpcProgram();
    mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
    assertTrue(mountd.getPortmapUdpTimeoutMillis() == newTimeoutMillis);
    RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
    nfsd.nullProcedure();
    assertTrue(nfsd.getPortmapUdpTimeoutMillis() == newTimeoutMillis);
    cluster.shutdown();
}
Also used : RpcProgramMountd(org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) RpcProgramNfs3(org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3) XDR(org.apache.hadoop.oncrpc.XDR) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) RpcProgramNfs3(org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3) Nfs3(org.apache.hadoop.hdfs.nfs.nfs3.Nfs3) Test(org.junit.Test)

Example 90 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestExportsTable method testExportPoint.

@Test
public void testExportPoint() throws IOException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    String exportPoint = "/myexport1";
    config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        // Start nfs
        final Nfs3 nfsServer = new Nfs3(config);
        nfsServer.startServiceInternal(false);
        Mountd mountd = nfsServer.getMountd();
        RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
        assertTrue(rpcMount.getExports().size() == 1);
        String exportInMountd = rpcMount.getExports().get(0);
        assertTrue(exportInMountd.equals(exportPoint));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : RpcProgramMountd(org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Mountd(org.apache.hadoop.hdfs.nfs.mount.Mountd) RpcProgramMountd(org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) Test(org.junit.Test)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24