use of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties in project hadoop by apache.
the class TestDFSAddressConfig method testDFSAddressConfig.
@Test
public void testDFSAddressConfig() throws IOException {
Configuration conf = new HdfsConfiguration();
/*-------------------------------------------------------------------------
* By default, the DataNode socket address should be localhost (127.0.0.1).
*------------------------------------------------------------------------*/
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
ArrayList<DataNode> dns = cluster.getDataNodes();
DataNode dn = dns.get(0);
String selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
/*-------------------------------------------------------------------------
* Shut down the datanodes, reconfigure, and bring them back up.
* Even if told to use the configuration properties for dfs.datanode,
* MiniDFSCluster.startDataNodes() should use localhost as the default if
* the dfs.datanode properties are not set.
*------------------------------------------------------------------------*/
for (int i = 0; i < dns.size(); i++) {
DataNodeProperties dnp = cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode", dnp);
}
conf.unset(DFS_DATANODE_ADDRESS_KEY);
conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null, null, null, false, true);
dns = cluster.getDataNodes();
dn = dns.get(0);
selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 127.0.0.1
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
/*-------------------------------------------------------------------------
* Shut down the datanodes, reconfigure, and bring them back up.
* This time, modify the dfs.datanode properties and make sure that they
* are used to configure sockets by MiniDFSCluster.startDataNodes().
*------------------------------------------------------------------------*/
for (int i = 0; i < dns.size(); i++) {
DataNodeProperties dnp = cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode", dnp);
}
conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null, null, null, false, true);
dns = cluster.getDataNodes();
dn = dns.get(0);
selfSocketAddr = dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
// assert that default self socket address is 0.0.0.0
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties in project hadoop by apache.
the class TestDFSClientExcludedNodes method testExcludedNodesForgiveness.
@Test(timeout = 60000)
public void testExcludedNodesForgiveness() throws IOException {
// Forgive nodes in under 2.5s for this test case.
conf.setLong(HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY, 2500);
// We'll be using a 512 bytes block size just for tests
// so making sure the checksum bytes too match it.
conf.setInt("io.bytes.per.checksum", 512);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/testForgivingExcludedNodes");
// 256 bytes data chunk for writes
byte[] bytes = new byte[256];
for (int index = 0; index < bytes.length; index++) {
bytes[index] = '0';
}
// File with a 512 bytes block size
FSDataOutputStream out = fs.create(filePath, true, 4096, (short) 3, 512);
// Write a block to all 3 DNs (2x256bytes).
out.write(bytes);
out.write(bytes);
out.hflush();
// Remove two DNs, to put them into the exclude list.
DataNodeProperties two = cluster.stopDataNode(2);
DataNodeProperties one = cluster.stopDataNode(1);
// Write another block.
// At this point, we have two nodes already in excluded list.
out.write(bytes);
out.write(bytes);
out.hflush();
// Bring back the older DNs, since they are gonna be forgiven only
// afterwards of this previous block write.
Assert.assertEquals(true, cluster.restartDataNode(one, true));
Assert.assertEquals(true, cluster.restartDataNode(two, true));
cluster.waitActive();
// Sleep for 5s, to let the excluded nodes be expired
// from the excludes list (i.e. forgiven after the configured wait period).
// [Sleeping just in case the restart of the DNs completed < 5s cause
// otherwise, we'll end up quickly excluding those again.]
ThreadUtil.sleepAtLeastIgnoreInterrupts(5000);
// Terminate the last good DN, to assert that there's no
// single-DN-available scenario, caused by not forgiving the other
// two by now.
cluster.stopDataNode(0);
try {
// Attempt writing another block, which should still pass
// cause the previous two should have been forgiven by now,
// while the last good DN added to excludes this time.
out.write(bytes);
out.hflush();
out.close();
} catch (Exception e) {
fail("Excluded DataNodes should be forgiven after a while and " + "not cause file writing exception of: '" + e.getMessage() + "'");
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties in project hadoop by apache.
the class TestFileAppend method testMultiAppend2.
/**
* Old replica of the block should not be accepted as valid for append/read
*/
@Test
public void testMultiAppend2() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "false");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
DistributedFileSystem fs = null;
final String hello = "hello\n";
try {
fs = cluster.getFileSystem();
Path path = new Path("/test");
FSDataOutputStream out = fs.create(path);
out.writeBytes(hello);
out.close();
// stop one datanode
DataNodeProperties dnProp = cluster.stopDataNode(0);
String dnAddress = dnProp.datanode.getXferAddress().toString();
if (dnAddress.startsWith("/")) {
dnAddress = dnAddress.substring(1);
}
// append again to bump genstamps
for (int i = 0; i < 2; i++) {
out = fs.append(path, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
out.writeBytes(hello);
out.close();
}
// re-open and make the block state as underconstruction
out = fs.append(path, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
cluster.restartDataNode(dnProp, true);
// wait till the block report comes
Thread.sleep(2000);
out.writeBytes(hello);
out.close();
// check the block locations
LocatedBlocks blocks = fs.getClient().getLocatedBlocks(path.toString(), 0L);
// since we append the file 3 time, we should be 4 blocks
assertEquals(4, blocks.getLocatedBlocks().size());
for (LocatedBlock block : blocks.getLocatedBlocks()) {
assertEquals(hello.length(), block.getBlockSize());
}
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 4; i++) {
sb.append(hello);
}
final byte[] content = sb.toString().getBytes();
AppendTestUtil.checkFullFile(fs, path, content.length, content, "Read /test");
// restart namenode to make sure the editlog can be properly applied
cluster.restartNameNode(true);
cluster.waitActive();
AppendTestUtil.checkFullFile(fs, path, content.length, content, "Read /test");
blocks = fs.getClient().getLocatedBlocks(path.toString(), 0L);
// since we append the file 3 time, we should be 4 blocks
assertEquals(4, blocks.getLocatedBlocks().size());
for (LocatedBlock block : blocks.getLocatedBlocks()) {
assertEquals(hello.length(), block.getBlockSize());
}
} finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties in project hadoop by apache.
the class TestFileAppend method testFailedAppendBlockRejection.
/**
* Old replica of the block should not be accepted as valid for append/read
*/
@Test
public void testFailedAppendBlockRejection() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "false");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
DistributedFileSystem fs = null;
try {
fs = cluster.getFileSystem();
Path path = new Path("/test");
FSDataOutputStream out = fs.create(path);
out.writeBytes("hello\n");
out.close();
// stop one datanode
DataNodeProperties dnProp = cluster.stopDataNode(0);
String dnAddress = dnProp.datanode.getXferAddress().toString();
if (dnAddress.startsWith("/")) {
dnAddress = dnAddress.substring(1);
}
// append again to bump genstamps
for (int i = 0; i < 2; i++) {
out = fs.append(path);
out.writeBytes("helloagain\n");
out.close();
}
// re-open and make the block state as underconstruction
out = fs.append(path);
cluster.restartDataNode(dnProp, true);
// wait till the block report comes
Thread.sleep(2000);
// check the block locations, this should not contain restarted datanode
BlockLocation[] locations = fs.getFileBlockLocations(path, 0, Long.MAX_VALUE);
String[] names = locations[0].getNames();
for (String node : names) {
if (node.equals(dnAddress)) {
fail("Failed append should not be present in latest block locations.");
}
}
out.close();
} finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
Aggregations