use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.
the class TestUpgradeDomainBlockPlacementPolicy method refreshDatanodeAdminProperties2.
/**
* Define admin properties for these datanodes as follows.
* dn0's upgrade domain is ud5.
* dn1's upgrade domain is ud2.
* dn2's upgrade domain is ud3.
* dn3's upgrade domain is ud1.
* dn4's upgrade domain is ud2.
* dn5's upgrade domain is ud4.
* dn2 and dn3 are decommissioned.
* Given dn0, dn1 and dn2 are on rack1 and dn3, dn4 and dn5 are on
* rack2. Then any block's replicas should be on either
* {dn0, dn1, d5} or {dn0, dn4, dn5}.
*/
private void refreshDatanodeAdminProperties2() throws IOException {
DatanodeAdminProperties[] datanodes = new DatanodeAdminProperties[hosts.length];
for (int i = 0; i < hosts.length; i++) {
datanodes[i] = new DatanodeAdminProperties();
DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
datanodes[i].setHostName(datanodeID.getHostName());
datanodes[i].setPort(datanodeID.getXferPort());
datanodes[i].setUpgradeDomain(upgradeDomains[i]);
}
datanodes[2].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
datanodes[3].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
hostsFileWriter.initIncludeHosts(datanodes);
cluster.getFileSystem().refreshNodes();
expectedDatanodeIDs.clear();
expectedDatanodeIDs.add(cluster.getDataNodes().get(0).getDatanodeId());
expectedDatanodeIDs.add(cluster.getDataNodes().get(5).getDatanodeId());
}
use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.
the class TestCombinedHostsFileReader method testEmptyCombinedHostsFileReader.
/*
* Test empty json config file
*/
@Test
public void testEmptyCombinedHostsFileReader() throws Exception {
FileWriter hosts = new FileWriter(NEW_FILE);
hosts.write("");
hosts.close();
Set<DatanodeAdminProperties> all = CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath());
assertEquals(0, all.size());
}
use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.
the class CombinedHostsFileReader method readFile.
/**
* Deserialize a set of DatanodeAdminProperties from a json file.
* @param hostsFile the input json file to read from.
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
public static Set<DatanodeAdminProperties> readFile(final String hostsFile) throws IOException {
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
try (Reader input = new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
Iterator<DatanodeAdminProperties> iterator = READER.readValues(JSON_FACTORY.createParser(input));
while (iterator.hasNext()) {
DatanodeAdminProperties properties = iterator.next();
allDNs.add(properties);
}
}
return allDNs;
}
use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.
the class CombinedHostFileManager method refresh.
private void refresh(final String hostsFile) throws IOException {
HostProperties hostProps = new HostProperties();
Set<DatanodeAdminProperties> all = CombinedHostsFileReader.readFile(hostsFile);
for (DatanodeAdminProperties properties : all) {
InetSocketAddress addr = parseEntry(hostsFile, properties.getHostName(), properties.getPort());
if (addr != null) {
hostProps.add(addr.getAddress(), properties);
}
}
refresh(hostProps);
}
Aggregations