use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestNameNodeRespectsBindHostKeys method testLifelineRpcBindHostKey.
@Test(timeout = 300000)
public void testLifelineRpcBindHostKey() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
LOG.info("Testing without " + DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
// NN should not bind the wildcard address by default.
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = getLifelineRpcServerAddress(cluster);
assertThat("Bind address not expected to be wildcard by default.", address, not("/" + WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY);
// Tell NN to bind the wildcard address.
conf.set(DFS_NAMENODE_LIFELINE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
// Verify that NN binds wildcard address now.
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = getLifelineRpcServerAddress(cluster);
assertThat("Bind address " + address + " is not wildcard.", address, is("/" + WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestWrites method testOverlappingWrites.
@Test
public void testOverlappingWrites() throws IOException, InterruptedException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
RpcProgramNfs3 nfsd;
final int bufSize = 32;
SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser = System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
Nfs3 nfs3 = new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
HdfsFileStatus status = dfsClient.getFileInfo("/");
FileHandle rootHandle = new FileHandle(status.getFileId());
CREATE3Request createReq = new CREATE3Request(rootHandle, "overlapping-writes" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
XDR createXdr = new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
FileHandle handle = createRsp.getObjHandle();
byte[] buffer = new byte[bufSize];
for (int i = 0; i < bufSize; i++) {
buffer[i] = (byte) i;
}
int[][] ranges = new int[][] { { 0, 10 }, { 5, 7 }, { 5, 5 }, { 10, 6 }, { 18, 6 }, { 20, 6 }, { 28, 4 }, { 16, 2 }, { 25, 4 } };
for (int i = 0; i < ranges.length; i++) {
int[] x = ranges[i];
byte[] tbuffer = new byte[x[1]];
for (int j = 0; j < x[1]; j++) {
tbuffer[j] = buffer[x[0] + j];
}
WRITE3Request writeReq = new WRITE3Request(handle, (long) x[0], x[1], WriteStableHow.UNSTABLE, ByteBuffer.wrap(tbuffer));
XDR writeXdr = new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
}
waitWrite(nfsd, handle, 60000);
READ3Request readReq = new READ3Request(handle, 0, bufSize);
XDR readXdr = new XDR();
readReq.serialize(readXdr);
READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestWrites method testOOOWrites.
@Test
public void testOOOWrites() throws IOException, InterruptedException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
RpcProgramNfs3 nfsd;
final int bufSize = 32;
final int numOOO = 3;
SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser = System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
Nfs3 nfs3 = new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
HdfsFileStatus status = dfsClient.getFileInfo("/");
FileHandle rootHandle = new FileHandle(status.getFileId());
CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
XDR createXdr = new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
FileHandle handle = createRsp.getObjHandle();
byte[][] oooBuf = new byte[numOOO][bufSize];
for (int i = 0; i < numOOO; i++) {
Arrays.fill(oooBuf[i], (byte) i);
}
for (int i = 0; i < numOOO; i++) {
final long offset = (numOOO - 1 - i) * bufSize;
WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
XDR writeXdr = new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
}
waitWrite(nfsd, handle, 60000);
READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
XDR readXdr = new XDR();
readReq.serialize(readXdr);
READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestMountd method testStart.
@Test
public void testStart() throws IOException {
// Start minicluster
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
// 1s
int newTimeoutMillis = 1000;
// Set the new portmap rpc timeout values and check
config.setInt(NfsConfigKeys.NFS_UDP_CLIENT_PORTMAP_TIMEOUT_MILLIS_KEY, newTimeoutMillis);
assertTrue(config.getInt(NfsConfigKeys.NFS_UDP_CLIENT_PORTMAP_TIMEOUT_MILLIS_KEY, 0) == newTimeoutMillis);
// Start nfs
Nfs3 nfs3 = new Nfs3(config);
nfs3.startServiceInternal(false);
RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd().getRpcProgram();
mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
assertTrue(mountd.getPortmapUdpTimeoutMillis() == newTimeoutMillis);
RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
nfsd.nullProcedure();
assertTrue(nfsd.getPortmapUdpTimeoutMillis() == newTimeoutMillis);
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.
the class TestExportsTable method testExportPoint.
@Test
public void testExportPoint() throws IOException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
String exportPoint = "/myexport1";
config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
// Start nfs
final Nfs3 nfsServer = new Nfs3(config);
nfsServer.startServiceInternal(false);
Mountd mountd = nfsServer.getMountd();
RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
assertTrue(rpcMount.getExports().size() == 1);
String exportInMountd = rpcMount.getExports().get(0);
assertTrue(exportInMountd.equals(exportPoint));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations