use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.
the class TestDatanodeReport method testDatanodeReportWithUpgradeDomain.
/**
* This test verifies upgrade domain is set according to the JSON host file.
*/
@Test
public void testDatanodeReportWithUpgradeDomain() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, // 0.5s
500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/datanodeReport");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
final DFSClient client = cluster.getFileSystem().dfs;
final String ud1 = "ud1";
final String ud2 = "ud2";
try {
//wait until the cluster is up
cluster.waitActive();
DatanodeAdminProperties datanode = new DatanodeAdminProperties();
datanode.setHostName(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
datanode.setUpgradeDomain(ud1);
hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
client.refreshNodes();
DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL);
assertEquals(all[0].getUpgradeDomain(), ud1);
datanode.setUpgradeDomain(null);
hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
client.refreshNodes();
all = client.datanodeReport(DatanodeReportType.ALL);
assertEquals(all[0].getUpgradeDomain(), null);
datanode.setUpgradeDomain(ud2);
hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
client.refreshNodes();
all = client.datanodeReport(DatanodeReportType.ALL);
assertEquals(all[0].getUpgradeDomain(), ud2);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.
the class TestStartup method testNNRestart.
/**
* This test tests hosts include list contains host names. After namenode
* restarts, the still alive datanodes should not have any trouble in getting
* registrant again.
*/
@Test
public void testNNRestart() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
// heartbeat interval in seconds
int HEARTBEAT_INTERVAL = 1;
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(config, "work-dir/restartnn");
byte[] b = { 127, 0, 0, 1 };
InetAddress inetAddress = InetAddress.getByAddress(b);
hostsFileWriter.initIncludeHosts(new String[] { inetAddress.getHostName() });
int numDatanodes = 1;
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build();
cluster.waitActive();
cluster.restartNameNode();
NamenodeProtocols nn = cluster.getNameNodeRpc();
assertNotNull(nn);
assertTrue(cluster.isDataNodeUp());
DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE);
for (int i = 0; i < 5 && info.length != numDatanodes; i++) {
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
info = nn.getDatanodeReport(DatanodeReportType.LIVE);
}
assertEquals("Number of live nodes should be " + numDatanodes, numDatanodes, info.length);
} catch (IOException e) {
fail(StringUtils.stringifyException(e));
throw e;
} finally {
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}
use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.
the class AdminStatesBaseTest method setup.
@Before
public void setup() throws IOException {
// Set up the hosts/exclude files.
hostsFileWriter = new HostsFileWriter();
conf = new HdfsConfiguration();
if (useCombinedHostFileManager) {
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
}
// Setup conf
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 200);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, NAMENODE_REPLICATION_INTERVAL);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
hostsFileWriter.initialize(conf, "temp/admin");
}
use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.
the class TestBlocksWithNotEnoughRacks method testNodeDecomissionRespectsRackPolicy.
/*
* Test that rack policy is still respected when blocks are replicated
* due to node decommissioning.
*/
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
// Two blocks and four racks
String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
hostsFileWriter.initExcludeHost(name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
hostsFileWriter.cleanup();
}
}
use of org.apache.hadoop.hdfs.util.HostsFileWriter in project hadoop by apache.
the class TestBlocksWithNotEnoughRacks method testNodeDecomissionWithOverreplicationRespectsRackPolicy.
/*
* Test that rack policy is still respected when blocks are replicated
* due to node decommissioning, when the blocks are over-replicated.
*/
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 5;
final Path filePath = new Path("/testFile");
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
// All hosts are on two racks, only one host on /rack2
String[] racks = { "/rack1", "/rack2", "/rack1", "/rack1", "/rack1" };
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Lower the replication factor so the blocks are over replicated
REPLICATION_FACTOR = 2;
fs.setReplication(filePath, REPLICATION_FACTOR);
// Decommission one of the hosts with the block that is not on
// the lone host on rack2 (if we decomission that host it would
// be impossible to respect the rack policy).
BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
for (String top : locs[0].getTopologyPaths()) {
if (!top.startsWith("/rack2")) {
String name = top.substring("/rack1".length() + 1);
hostsFileWriter.initExcludeHost(name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
break;
}
}
// Check the block still has sufficient # replicas across racks,
// ie we didn't remove the replica on the host on /rack1.
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
hostsFileWriter.cleanup();
}
}
Aggregations