Search in sources :

Example 6 with DatanodeAdminProperties

use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.

the class TestUpgradeDomainBlockPlacementPolicy method refreshDatanodeAdminProperties2.

/**
   * Define admin properties for these datanodes as follows.
   * dn0's upgrade domain is ud5.
   * dn1's upgrade domain is ud2.
   * dn2's upgrade domain is ud3.
   * dn3's upgrade domain is ud1.
   * dn4's upgrade domain is ud2.
   * dn5's upgrade domain is ud4.
   * dn2 and dn3 are decommissioned.
   * Given dn0, dn1 and dn2 are on rack1 and dn3, dn4 and dn5 are on
   * rack2. Then any block's replicas should be on either
   * {dn0, dn1, d5} or {dn0, dn4, dn5}.
   */
private void refreshDatanodeAdminProperties2() throws IOException {
    DatanodeAdminProperties[] datanodes = new DatanodeAdminProperties[hosts.length];
    for (int i = 0; i < hosts.length; i++) {
        datanodes[i] = new DatanodeAdminProperties();
        DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
        datanodes[i].setHostName(datanodeID.getHostName());
        datanodes[i].setPort(datanodeID.getXferPort());
        datanodes[i].setUpgradeDomain(upgradeDomains[i]);
    }
    datanodes[2].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
    datanodes[3].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
    hostsFileWriter.initIncludeHosts(datanodes);
    cluster.getFileSystem().refreshNodes();
    expectedDatanodeIDs.clear();
    expectedDatanodeIDs.add(cluster.getDataNodes().get(0).getDatanodeId());
    expectedDatanodeIDs.add(cluster.getDataNodes().get(5).getDatanodeId());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)

Example 7 with DatanodeAdminProperties

use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.

the class TestCombinedHostsFileReader method testEmptyCombinedHostsFileReader.

/*
   * Test empty json config file
   */
@Test
public void testEmptyCombinedHostsFileReader() throws Exception {
    FileWriter hosts = new FileWriter(NEW_FILE);
    hosts.write("");
    hosts.close();
    Set<DatanodeAdminProperties> all = CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath());
    assertEquals(0, all.size());
}
Also used : FileWriter(java.io.FileWriter) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties) Test(org.junit.Test)

Example 8 with DatanodeAdminProperties

use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.

the class CombinedHostsFileReader method readFile.

/**
   * Deserialize a set of DatanodeAdminProperties from a json file.
   * @param hostsFile the input json file to read from.
   * @return the set of DatanodeAdminProperties
   * @throws IOException
   */
public static Set<DatanodeAdminProperties> readFile(final String hostsFile) throws IOException {
    HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
    try (Reader input = new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
        Iterator<DatanodeAdminProperties> iterator = READER.readValues(JSON_FACTORY.createParser(input));
        while (iterator.hasNext()) {
            DatanodeAdminProperties properties = iterator.next();
            allDNs.add(properties);
        }
    }
    return allDNs;
}
Also used : InputStreamReader(java.io.InputStreamReader) Reader(java.io.Reader) InputStreamReader(java.io.InputStreamReader) ObjectReader(com.fasterxml.jackson.databind.ObjectReader) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties) FileInputStream(java.io.FileInputStream) HashSet(java.util.HashSet)

Example 9 with DatanodeAdminProperties

use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.

the class CombinedHostFileManager method refresh.

private void refresh(final String hostsFile) throws IOException {
    HostProperties hostProps = new HostProperties();
    Set<DatanodeAdminProperties> all = CombinedHostsFileReader.readFile(hostsFile);
    for (DatanodeAdminProperties properties : all) {
        InetSocketAddress addr = parseEntry(hostsFile, properties.getHostName(), properties.getPort());
        if (addr != null) {
            hostProps.add(addr.getAddress(), properties);
        }
    }
    refresh(hostProps);
}
Also used : InetSocketAddress(java.net.InetSocketAddress) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)

Aggregations

DatanodeAdminProperties (org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)9 HashSet (java.util.HashSet)3 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)3 HostsFileWriter (org.apache.hadoop.hdfs.util.HostsFileWriter)2 Test (org.junit.Test)2 ObjectReader (com.fasterxml.jackson.databind.ObjectReader)1 FileInputStream (java.io.FileInputStream)1 FileWriter (java.io.FileWriter)1 InputStreamReader (java.io.InputStreamReader)1 Reader (java.io.Reader)1 InetSocketAddress (java.net.InetSocketAddress)1 Map (java.util.Map)1 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 Matchers.anyString (org.mockito.Matchers.anyString)1