use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class TestRpcProgramNfs3 method testDeprecatedKeys.
@Test
public void testDeprecatedKeys() {
NfsConfiguration conf = new NfsConfiguration();
conf.setInt("nfs3.server.port", 998);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, 0) == 998);
conf.setInt("nfs3.mountd.port", 999);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, 0) == 999);
conf.set("dfs.nfs.exports.allowed.hosts", "host1");
assertTrue(conf.get(CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY).equals("host1"));
conf.setInt("dfs.nfs.exports.cache.expirytime.millis", 1000);
assertTrue(conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY, 0) == 1000);
conf.setInt("hadoop.nfs.userupdate.milly", 10);
assertTrue(conf.getInt(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY, 0) == 10);
conf.set("dfs.nfs3.dump.dir", "/nfs/tmp");
assertTrue(conf.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY).equals("/nfs/tmp"));
conf.setBoolean("dfs.nfs3.enableDump", false);
assertTrue(conf.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY, true) == false);
conf.setInt("dfs.nfs3.max.open.files", 500);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 0) == 500);
conf.setInt("dfs.nfs3.stream.timeout", 6000);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY, 0) == 6000);
conf.set("dfs.nfs3.export.point", "/dir1");
assertTrue(conf.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY).equals("/dir1"));
}
use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class TestExportsTable method testExportPoint.
@Test
public void testExportPoint() throws IOException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
String exportPoint = "/myexport1";
config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
// Start nfs
final Nfs3 nfsServer = new Nfs3(config);
nfsServer.startServiceInternal(false);
Mountd mountd = nfsServer.getMountd();
RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
assertTrue(rpcMount.getExports().size() == 1);
String exportInMountd = rpcMount.getExports().get(0);
assertTrue(exportInMountd.equals(exportPoint));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class TestOutOfOrderWrite method main.
public static void main(String[] args) throws InterruptedException {
Arrays.fill(data1, (byte) 7);
Arrays.fill(data2, (byte) 8);
Arrays.fill(data3, (byte) 9);
// NFS3 Create request
NfsConfiguration conf = new NfsConfiguration();
WriteClient client = new WriteClient("localhost", conf.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), create(), false);
client.run();
while (handle == null) {
Thread.sleep(1000);
System.out.println("handle is still null...");
}
LOG.info("Send write1 request");
XDR writeReq;
writeReq = write(handle, 0x8000005c, 2000, 1000, data3);
Nfs3Utils.writeChannel(channel, writeReq, 1);
writeReq = write(handle, 0x8000005d, 1000, 1000, data2);
Nfs3Utils.writeChannel(channel, writeReq, 2);
writeReq = write(handle, 0x8000005e, 0, 1000, data1);
Nfs3Utils.writeChannel(channel, writeReq, 3);
// TODO: convert to Junit test, and validate result automatically
}
use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class TestDFSClientCache method testGetUserGroupInformationSecure.
@Test
public void testGetUserGroupInformationSecure() throws IOException {
String userName = "user1";
String currentUser = "test-user";
NfsConfiguration conf = new NfsConfiguration();
UserGroupInformation currentUserUgi = UserGroupInformation.createRemoteUser(currentUser);
currentUserUgi.setAuthenticationMethod(KERBEROS);
UserGroupInformation.setLoginUser(currentUserUgi);
DFSClientCache cache = new DFSClientCache(conf);
UserGroupInformation ugiResult = cache.getUserGroupInformation(userName, currentUserUgi);
assertThat(ugiResult.getUserName(), is(userName));
assertThat(ugiResult.getRealUser(), is(currentUserUgi));
assertThat(ugiResult.getAuthenticationMethod(), is(UserGroupInformation.AuthenticationMethod.PROXY));
}
use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class TestDFSClientCache method testGetUserGroupInformation.
@Test
public void testGetUserGroupInformation() throws IOException {
String userName = "user1";
String currentUser = "currentUser";
UserGroupInformation currentUserUgi = UserGroupInformation.createUserForTesting(currentUser, new String[0]);
NfsConfiguration conf = new NfsConfiguration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost");
DFSClientCache cache = new DFSClientCache(conf);
UserGroupInformation ugiResult = cache.getUserGroupInformation(userName, currentUserUgi);
assertThat(ugiResult.getUserName(), is(userName));
assertThat(ugiResult.getRealUser(), is(currentUserUgi));
assertThat(ugiResult.getAuthenticationMethod(), is(UserGroupInformation.AuthenticationMethod.PROXY));
}
Aggregations