Search in sources :

Example 1 with DatanodeAdminProperties

use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.

the class HostsFileWriter method initOutOfServiceHosts.

public void initOutOfServiceHosts(List<String> decommissionHostNameAndPorts, Map<String, Long> maintenanceHosts) throws IOException {
    StringBuilder excludeHosts = new StringBuilder();
    if (isLegacyHostsFile) {
        if (maintenanceHosts != null && maintenanceHosts.size() > 0) {
            throw new UnsupportedOperationException("maintenance support isn't supported by legacy hosts file");
        }
        for (String hostNameAndPort : decommissionHostNameAndPorts) {
            excludeHosts.append(hostNameAndPort).append("\n");
        }
        DFSTestUtil.writeFile(localFileSys, excludeFile, excludeHosts.toString());
    } else {
        HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
        if (decommissionHostNameAndPorts != null) {
            for (String hostNameAndPort : decommissionHostNameAndPorts) {
                DatanodeAdminProperties dn = new DatanodeAdminProperties();
                String[] hostAndPort = hostNameAndPort.split(":");
                dn.setHostName(hostAndPort[0]);
                dn.setPort(Integer.parseInt(hostAndPort[1]));
                dn.setAdminState(AdminStates.DECOMMISSIONED);
                allDNs.add(dn);
            }
        }
        if (maintenanceHosts != null) {
            for (Map.Entry<String, Long> hostEntry : maintenanceHosts.entrySet()) {
                DatanodeAdminProperties dn = new DatanodeAdminProperties();
                String[] hostAndPort = hostEntry.getKey().split(":");
                dn.setHostName(hostAndPort[0]);
                dn.setPort(Integer.parseInt(hostAndPort[1]));
                dn.setAdminState(AdminStates.IN_MAINTENANCE);
                dn.setMaintenanceExpireTimeInMS(hostEntry.getValue());
                allDNs.add(dn);
            }
        }
        CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
    }
}
Also used : DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties) Map(java.util.Map) HashSet(java.util.HashSet)

Example 2 with DatanodeAdminProperties

use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.

the class HostsFileWriter method initIncludeHosts.

public void initIncludeHosts(String[] hostNameAndPorts) throws IOException {
    StringBuilder includeHosts = new StringBuilder();
    if (isLegacyHostsFile) {
        for (String hostNameAndPort : hostNameAndPorts) {
            includeHosts.append(hostNameAndPort).append("\n");
        }
        DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
    } else {
        HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
        for (String hostNameAndPort : hostNameAndPorts) {
            String[] hostAndPort = hostNameAndPort.split(":");
            DatanodeAdminProperties dn = new DatanodeAdminProperties();
            dn.setHostName(hostAndPort[0]);
            dn.setPort(Integer.parseInt(hostAndPort[1]));
            allDNs.add(dn);
        }
        CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
    }
}
Also used : DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties) HashSet(java.util.HashSet)

Example 3 with DatanodeAdminProperties

use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.

the class TestFsck method testUpgradeDomain.

private void testUpgradeDomain(boolean defineUpgradeDomain, boolean displayUpgradeDomain) throws Exception {
    final short replFactor = 1;
    final short numDN = 1;
    final long blockSize = 512;
    final long fileSize = 1024;
    final String upgradeDomain = "ud1";
    final String[] racks = { "/rack1" };
    final String[] hosts = { "127.0.0.1" };
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
    if (defineUpgradeDomain) {
        conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
        hostsFileWriter.initialize(conf, "temp/fsckupgradedomain");
    }
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).hosts(hosts).racks(racks).build();
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    // Configure the upgrade domain on the datanode
    if (defineUpgradeDomain) {
        DatanodeAdminProperties dnProp = new DatanodeAdminProperties();
        DatanodeID datanodeID = cluster.getDataNodes().get(0).getDatanodeId();
        dnProp.setHostName(datanodeID.getHostName());
        dnProp.setPort(datanodeID.getXferPort());
        dnProp.setUpgradeDomain(upgradeDomain);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { dnProp });
        cluster.getFileSystem().refreshNodes();
    }
    // create files
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L);
    DFSTestUtil.waitReplication(dfs, path, replFactor);
    try {
        String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", displayUpgradeDomain ? "-upgradedomains" : "-locations");
        assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
        String udValue = defineUpgradeDomain ? upgradeDomain : NamenodeFsck.UNDEFINED;
        assertEquals(displayUpgradeDomain, fsckOut.contains("(ud=" + udValue + ")"));
    } finally {
        if (defineUpgradeDomain) {
            hostsFileWriter.cleanup();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)

Example 4 with DatanodeAdminProperties

use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.

the class TestDatanodeReport method testDatanodeReportWithUpgradeDomain.

/**
   * This test verifies upgrade domain is set according to the JSON host file.
   */
@Test
public void testDatanodeReportWithUpgradeDomain() throws Exception {
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, // 0.5s
    500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/datanodeReport");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    final DFSClient client = cluster.getFileSystem().dfs;
    final String ud1 = "ud1";
    final String ud2 = "ud2";
    try {
        //wait until the cluster is up
        cluster.waitActive();
        DatanodeAdminProperties datanode = new DatanodeAdminProperties();
        datanode.setHostName(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
        datanode.setUpgradeDomain(ud1);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
        client.refreshNodes();
        DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals(all[0].getUpgradeDomain(), ud1);
        datanode.setUpgradeDomain(null);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
        client.refreshNodes();
        all = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals(all[0].getUpgradeDomain(), null);
        datanode.setUpgradeDomain(ud2);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
        client.refreshNodes();
        all = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals(all[0].getUpgradeDomain(), ud2);
    } finally {
        cluster.shutdown();
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties) Test(org.junit.Test)

Example 5 with DatanodeAdminProperties

use of org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties in project hadoop by apache.

the class TestUpgradeDomainBlockPlacementPolicy method refreshDatanodeAdminProperties.

/**
   * Define admin properties for these datanodes as follows.
   * dn0's upgrade domain is ud5.
   * dn1's upgrade domain is ud2.
   * dn2's upgrade domain is ud3.
   * dn3's upgrade domain is ud1.
   * dn4's upgrade domain is ud2.
   * dn5's upgrade domain is ud4.
   * dn0 and dn5 are decommissioned.
   * Given dn0, dn1 and dn2 are on rack1 and dn3, dn4 and dn5 are on
   * rack2. Then any block's replicas should be on either
   * {dn1, dn2, d3} or {dn2, dn3, dn4}.
   */
private void refreshDatanodeAdminProperties() throws IOException {
    DatanodeAdminProperties[] datanodes = new DatanodeAdminProperties[hosts.length];
    for (int i = 0; i < hosts.length; i++) {
        datanodes[i] = new DatanodeAdminProperties();
        DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
        datanodes[i].setHostName(datanodeID.getHostName());
        datanodes[i].setPort(datanodeID.getXferPort());
        datanodes[i].setUpgradeDomain(upgradeDomains[i]);
    }
    datanodes[0].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
    datanodes[5].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
    hostsFileWriter.initIncludeHosts(datanodes);
    cluster.getFileSystem().refreshNodes();
    expectedDatanodeIDs.clear();
    expectedDatanodeIDs.add(cluster.getDataNodes().get(2).getDatanodeId());
    expectedDatanodeIDs.add(cluster.getDataNodes().get(3).getDatanodeId());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)

Aggregations

DatanodeAdminProperties (org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)9 HashSet (java.util.HashSet)3 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)3 HostsFileWriter (org.apache.hadoop.hdfs.util.HostsFileWriter)2 Test (org.junit.Test)2 ObjectReader (com.fasterxml.jackson.databind.ObjectReader)1 FileInputStream (java.io.FileInputStream)1 FileWriter (java.io.FileWriter)1 InputStreamReader (java.io.InputStreamReader)1 Reader (java.io.Reader)1 InetSocketAddress (java.net.InetSocketAddress)1 Map (java.util.Map)1 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 Matchers.anyString (org.mockito.Matchers.anyString)1