Search in sources :

Example 11 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class TestRpcProgramNfs3 method testDeprecatedKeys.

@Test
public void testDeprecatedKeys() {
    NfsConfiguration conf = new NfsConfiguration();
    conf.setInt("nfs3.server.port", 998);
    assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, 0) == 998);
    conf.setInt("nfs3.mountd.port", 999);
    assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, 0) == 999);
    conf.set("dfs.nfs.exports.allowed.hosts", "host1");
    assertTrue(conf.get(CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY).equals("host1"));
    conf.setInt("dfs.nfs.exports.cache.expirytime.millis", 1000);
    assertTrue(conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY, 0) == 1000);
    conf.setInt("hadoop.nfs.userupdate.milly", 10);
    assertTrue(conf.getInt(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY, 0) == 10);
    conf.set("dfs.nfs3.dump.dir", "/nfs/tmp");
    assertTrue(conf.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY).equals("/nfs/tmp"));
    conf.setBoolean("dfs.nfs3.enableDump", false);
    assertTrue(conf.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY, true) == false);
    conf.setInt("dfs.nfs3.max.open.files", 500);
    assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 0) == 500);
    conf.setInt("dfs.nfs3.stream.timeout", 6000);
    assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY, 0) == 6000);
    conf.set("dfs.nfs3.export.point", "/dir1");
    assertTrue(conf.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY).equals("/dir1"));
}
Also used : NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) Test(org.junit.Test)

Example 12 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class TestExportsTable method testExportPoint.

@Test
public void testExportPoint() throws IOException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    String exportPoint = "/myexport1";
    config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        // Start nfs
        final Nfs3 nfsServer = new Nfs3(config);
        nfsServer.startServiceInternal(false);
        Mountd mountd = nfsServer.getMountd();
        RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
        assertTrue(rpcMount.getExports().size() == 1);
        String exportInMountd = rpcMount.getExports().get(0);
        assertTrue(exportInMountd.equals(exportPoint));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : RpcProgramMountd(org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Mountd(org.apache.hadoop.hdfs.nfs.mount.Mountd) RpcProgramMountd(org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) Test(org.junit.Test)

Example 13 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class TestOutOfOrderWrite method main.

public static void main(String[] args) throws InterruptedException {
    Arrays.fill(data1, (byte) 7);
    Arrays.fill(data2, (byte) 8);
    Arrays.fill(data3, (byte) 9);
    // NFS3 Create request
    NfsConfiguration conf = new NfsConfiguration();
    WriteClient client = new WriteClient("localhost", conf.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), create(), false);
    client.run();
    while (handle == null) {
        Thread.sleep(1000);
        System.out.println("handle is still null...");
    }
    LOG.info("Send write1 request");
    XDR writeReq;
    writeReq = write(handle, 0x8000005c, 2000, 1000, data3);
    Nfs3Utils.writeChannel(channel, writeReq, 1);
    writeReq = write(handle, 0x8000005d, 1000, 1000, data2);
    Nfs3Utils.writeChannel(channel, writeReq, 2);
    writeReq = write(handle, 0x8000005e, 0, 1000, data1);
    Nfs3Utils.writeChannel(channel, writeReq, 3);
// TODO: convert to Junit test, and validate result automatically
}
Also used : XDR(org.apache.hadoop.oncrpc.XDR) NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)

Example 14 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class TestDFSClientCache method testGetUserGroupInformationSecure.

@Test
public void testGetUserGroupInformationSecure() throws IOException {
    String userName = "user1";
    String currentUser = "test-user";
    NfsConfiguration conf = new NfsConfiguration();
    UserGroupInformation currentUserUgi = UserGroupInformation.createRemoteUser(currentUser);
    currentUserUgi.setAuthenticationMethod(KERBEROS);
    UserGroupInformation.setLoginUser(currentUserUgi);
    DFSClientCache cache = new DFSClientCache(conf);
    UserGroupInformation ugiResult = cache.getUserGroupInformation(userName, currentUserUgi);
    assertThat(ugiResult.getUserName(), is(userName));
    assertThat(ugiResult.getRealUser(), is(currentUserUgi));
    assertThat(ugiResult.getAuthenticationMethod(), is(UserGroupInformation.AuthenticationMethod.PROXY));
}
Also used : NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 15 with NfsConfiguration

use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.

the class TestDFSClientCache method testGetUserGroupInformation.

@Test
public void testGetUserGroupInformation() throws IOException {
    String userName = "user1";
    String currentUser = "currentUser";
    UserGroupInformation currentUserUgi = UserGroupInformation.createUserForTesting(currentUser, new String[0]);
    NfsConfiguration conf = new NfsConfiguration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost");
    DFSClientCache cache = new DFSClientCache(conf);
    UserGroupInformation ugiResult = cache.getUserGroupInformation(userName, currentUserUgi);
    assertThat(ugiResult.getUserName(), is(userName));
    assertThat(ugiResult.getRealUser(), is(currentUserUgi));
    assertThat(ugiResult.getAuthenticationMethod(), is(UserGroupInformation.AuthenticationMethod.PROXY));
}
Also used : NfsConfiguration(org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)21 Test (org.junit.Test)17 DFSClient (org.apache.hadoop.hdfs.DFSClient)12 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)8 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)8 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 COMMIT_STATUS (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS)5 CommitCtx (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx)5 XDR (org.apache.hadoop.oncrpc.XDR)5 InetSocketAddress (java.net.InetSocketAddress)4 Channel (org.jboss.netty.channel.Channel)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 CREATE3Request (org.apache.hadoop.nfs.nfs3.request.CREATE3Request)3 READ3Request (org.apache.hadoop.nfs.nfs3.request.READ3Request)3 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)3 WRITE3Request (org.apache.hadoop.nfs.nfs3.request.WRITE3Request)3 CREATE3Response (org.apache.hadoop.nfs.nfs3.response.CREATE3Response)3 READ3Response (org.apache.hadoop.nfs.nfs3.response.READ3Response)3