Search in sources :

Example 31 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class FSNamesystem method getEnteringMaintenanceNodes.

/**
   * Returned information is a JSON representation of map with host name of
   * nodes entering maintenance as the key and value as a map of various node
   * attributes to its values.
   */
// NameNodeMXBean
@Override
public String getEnteringMaintenanceNodes() {
    final Map<String, Map<String, Object>> nodesMap = new HashMap<String, Map<String, Object>>();
    final List<DatanodeDescriptor> enteringMaintenanceNodeList = blockManager.getDatanodeManager().getEnteringMaintenanceNodes();
    for (DatanodeDescriptor node : enteringMaintenanceNodeList) {
        Map<String, Object> attrMap = ImmutableMap.<String, Object>builder().put("xferaddr", node.getXferAddr()).put("underReplicatedBlocks", node.getLeavingServiceStatus().getUnderReplicatedBlocks()).put("maintenanceOnlyReplicas", node.getLeavingServiceStatus().getOutOfServiceOnlyReplicas()).put("underReplicateInOpenFiles", node.getLeavingServiceStatus().getUnderReplicatedInOpenFiles()).build();
        nodesMap.put(node.getHostName() + ":" + node.getXferPort(), attrMap);
    }
    return JSON.toString(nodesMap);
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 32 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestDFSNetworkTopology method testChooseRandomWithStorageType.

@Test
public void testChooseRandomWithStorageType() throws Exception {
    Node n;
    DatanodeDescriptor dd;
    // test the choose random can return desired storage type nodes without
    // exclude
    Set<String> diskUnderL1 = Sets.newHashSet("host2", "host4", "host5", "host6");
    Set<String> archiveUnderL1 = Sets.newHashSet("host1", "host3");
    Set<String> ramdiskUnderL1 = Sets.newHashSet("host7");
    Set<String> ssdUnderL1 = Sets.newHashSet("host8");
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("/l1", null, null, StorageType.DISK);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(diskUnderL1.contains(dd.getHostName()));
        n = CLUSTER.chooseRandomWithStorageType("/l1", null, null, StorageType.RAM_DISK);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(ramdiskUnderL1.contains(dd.getHostName()));
        n = CLUSTER.chooseRandomWithStorageType("/l1", null, null, StorageType.ARCHIVE);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(archiveUnderL1.contains(dd.getHostName()));
        n = CLUSTER.chooseRandomWithStorageType("/l1", null, null, StorageType.SSD);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(ssdUnderL1.contains(dd.getHostName()));
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) Node(org.apache.hadoop.net.Node) Test(org.junit.Test)

Example 33 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestDFSNetworkTopology method testChooseRandomWithStorageTypeWrapper.

/**
   * This test tests the wrapper method. The wrapper method only takes one scope
   * where if it starts with a ~, it is an excluded scope, and searching always
   * from root. Otherwise it is a scope.
   * @throws Exception throws exception.
   */
@Test
public void testChooseRandomWithStorageTypeWrapper() throws Exception {
    Node n;
    DatanodeDescriptor dd;
    n = CLUSTER.chooseRandomWithStorageType("/l2/d3/r4", null, null, StorageType.ARCHIVE);
    HashSet<Node> excluded = new HashSet<>();
    // exclude the host on r4 (since there is only one host, no randomness here)
    excluded.add(n);
    // search with given scope being desired scope
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("/l2/d3", null, StorageType.ARCHIVE);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host12") || dd.getHostName().equals("host13"));
    }
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("/l2/d3", excluded, StorageType.ARCHIVE);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host13"));
    }
    // so if we exclude /l2/d4/r1, if should be always either host7 or host10
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("~/l2/d4", null, StorageType.RAM_DISK);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host7") || dd.getHostName().equals("host10"));
    }
    // similar to above, except that we also exclude host10 here. so it should
    // always be host7
    n = CLUSTER.chooseRandomWithStorageType("/l2/d3/r2", null, null, StorageType.RAM_DISK);
    // add host10 to exclude
    excluded.add(n);
    for (int i = 0; i < 10; i++) {
        n = CLUSTER.chooseRandomWithStorageType("~/l2/d4", excluded, StorageType.RAM_DISK);
        assertTrue(n instanceof DatanodeDescriptor);
        dd = (DatanodeDescriptor) n;
        assertTrue(dd.getHostName().equals("host7"));
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) Node(org.apache.hadoop.net.Node) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 34 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestNameNodeMXBean method testNameNodeMXBeanInfo.

@SuppressWarnings({ "unchecked" })
@Test
public void testNameNodeMXBeanInfo() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
        cluster.waitActive();
        // Set upgrade domain on the first DN.
        String upgradeDomain = "abcd";
        DatanodeManager dm = cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager();
        DatanodeDescriptor dd = dm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
        dd.setUpgradeDomain(upgradeDomain);
        String dnXferAddrWithUpgradeDomainSet = dd.getXferAddr();
        // Put the second DN to maintenance state.
        DatanodeDescriptor maintenanceNode = dm.getDatanode(cluster.getDataNodes().get(1).getDatanodeId());
        maintenanceNode.setInMaintenance();
        String dnXferAddrInMaintenance = maintenanceNode.getXferAddr();
        FSNamesystem fsn = cluster.getNameNode().namesystem;
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        // get attribute "ClusterId"
        String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
        assertEquals(fsn.getClusterId(), clusterId);
        // get attribute "BlockPoolId"
        String blockpoolId = (String) mbs.getAttribute(mxbeanName, "BlockPoolId");
        assertEquals(fsn.getBlockPoolId(), blockpoolId);
        // get attribute "Version"
        String version = (String) mbs.getAttribute(mxbeanName, "Version");
        assertEquals(fsn.getVersion(), version);
        assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision()));
        // get attribute "Used"
        Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
        assertEquals(fsn.getUsed(), used.longValue());
        // get attribute "Total"
        Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
        assertEquals(fsn.getTotal(), total.longValue());
        // get attribute "safemode"
        String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
        assertEquals(fsn.getSafemode(), safemode);
        // get attribute nondfs
        Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
        assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
        // get attribute percentremaining
        Float percentremaining = (Float) (mbs.getAttribute(mxbeanName, "PercentRemaining"));
        assertEquals(fsn.getPercentRemaining(), percentremaining, DELTA);
        // get attribute Totalblocks
        Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
        assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
        // get attribute alivenodeinfo
        String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName, "LiveNodes"));
        Map<String, Map<String, Object>> liveNodes = (Map<String, Map<String, Object>>) JSON.parse(alivenodeinfo);
        assertTrue(liveNodes.size() == 2);
        for (Map<String, Object> liveNode : liveNodes.values()) {
            assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
            assertTrue(((Long) liveNode.get("nonDfsUsedSpace")) >= 0);
            assertTrue(liveNode.containsKey("capacity"));
            assertTrue(((Long) liveNode.get("capacity")) > 0);
            assertTrue(liveNode.containsKey("numBlocks"));
            assertTrue(((Long) liveNode.get("numBlocks")) == 0);
            assertTrue(liveNode.containsKey("lastBlockReport"));
            // a. By default the upgrade domain isn't defined on any DN.
            // b. If the upgrade domain is set on a DN, JMX should have the same
            // value.
            String xferAddr = (String) liveNode.get("xferaddr");
            if (!xferAddr.equals(dnXferAddrWithUpgradeDomainSet)) {
                assertTrue(!liveNode.containsKey("upgradeDomain"));
            } else {
                assertTrue(liveNode.get("upgradeDomain").equals(upgradeDomain));
            }
            // "adminState" is set to maintenance only for the specific dn.
            boolean inMaintenance = liveNode.get("adminState").equals(DatanodeInfo.AdminStates.IN_MAINTENANCE.toString());
            assertFalse(xferAddr.equals(dnXferAddrInMaintenance) ^ inMaintenance);
        }
        assertEquals(fsn.getLiveNodes(), alivenodeinfo);
        // get attributes DeadNodes
        String deadNodeInfo = (String) (mbs.getAttribute(mxbeanName, "DeadNodes"));
        assertEquals(fsn.getDeadNodes(), deadNodeInfo);
        // get attribute NodeUsage
        String nodeUsage = (String) (mbs.getAttribute(mxbeanName, "NodeUsage"));
        assertEquals("Bad value for NodeUsage", fsn.getNodeUsage(), nodeUsage);
        // get attribute NameJournalStatus
        String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName, "NameJournalStatus"));
        assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus);
        // get attribute JournalTransactionInfo
        String journalTxnInfo = (String) mbs.getAttribute(mxbeanName, "JournalTransactionInfo");
        assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(), journalTxnInfo);
        // get attribute "CompileInfo"
        String compileInfo = (String) mbs.getAttribute(mxbeanName, "CompileInfo");
        assertEquals("Bad value for CompileInfo", fsn.getCompileInfo(), compileInfo);
        // get attribute CorruptFiles
        String corruptFiles = (String) (mbs.getAttribute(mxbeanName, "CorruptFiles"));
        assertEquals("Bad value for CorruptFiles", fsn.getCorruptFiles(), corruptFiles);
        // get attribute NameDirStatuses
        String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
        assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
        Map<String, Map<String, String>> statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
        Collection<URI> nameDirUris = cluster.getNameDirs(0);
        for (URI nameDirUri : nameDirUris) {
            File nameDir = new File(nameDirUri);
            System.out.println("Checking for the presence of " + nameDir + " in active name dirs.");
            assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
        }
        assertEquals(2, statusMap.get("active").size());
        assertEquals(0, statusMap.get("failed").size());
        // This will cause the first dir to fail.
        File failedNameDir = new File(nameDirUris.iterator().next());
        assertEquals(0, FileUtil.chmod(new File(failedNameDir, "current").getAbsolutePath(), "000"));
        cluster.getNameNodeRpc().rollEditLog();
        nameDirStatuses = (String) (mbs.getAttribute(mxbeanName, "NameDirStatuses"));
        statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
        for (URI nameDirUri : nameDirUris) {
            File nameDir = new File(nameDirUri);
            String expectedStatus = nameDir.equals(failedNameDir) ? "failed" : "active";
            System.out.println("Checking for the presence of " + nameDir + " in " + expectedStatus + " name dirs.");
            assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath()));
        }
        assertEquals(1, statusMap.get("active").size());
        assertEquals(1, statusMap.get("failed").size());
        assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
        assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * cluster.getDataNodes().size(), mbs.getAttribute(mxbeanName, "CacheCapacity"));
        assertNull("RollingUpgradeInfo should be null when there is no rolling" + " upgrade", mbs.getAttribute(mxbeanName, "RollingUpgradeStatus"));
    } finally {
        if (cluster != null) {
            for (URI dir : cluster.getNameDirs(0)) {
                FileUtil.chmod(new File(new File(dir), "current").getAbsolutePath(), "755");
            }
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URI(java.net.URI) ObjectName(javax.management.ObjectName) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) Map(java.util.Map) HashMap(java.util.HashMap) File(java.io.File) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 35 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestFsck method testBlockIdCKMaintenance.

/**
   * Test for blockIdCK with datanode maintenance.
   */
@Test(timeout = 90000)
public void testBlockIdCKMaintenance() throws Exception {
    final short replFactor = 2;
    short numDn = 2;
    final long blockSize = 512;
    String[] hosts = { "host1", "host2" };
    String[] racks = { "/rack1", "/rack2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY, replFactor);
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, replFactor, 1000L);
    util.waitReplication(dfs, path, replFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
        sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
    }
    String[] bIds = sb.toString().split(" ");
    //make sure datanode that has replica is fine before maintenance
    String outStr = runFsck(conf, 0, true, "/", "-maintenance", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
    BlockManager bm = fsn.getBlockManager();
    DatanodeManager dnm = bm.getDatanodeManager();
    DatanodeDescriptor dn = dnm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
    bm.getDatanodeManager().getDecomManager().startMaintenance(dn, Long.MAX_VALUE);
    final String dnName = dn.getXferAddr();
    //wait for the node to enter maintenance state
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            DatanodeInfo datanodeInfo = null;
            try {
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isEnteringMaintenance()) {
                    String fsckOut = runFsck(conf, 5, false, "/", "-maintenance", "-blockId", bIds[0]);
                    assertTrue(fsckOut.contains(NamenodeFsck.ENTERING_MAINTENANCE_STATUS));
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    // Start 3rd DataNode
    cluster.startDataNodes(conf, 1, true, null, new String[] { "/rack3" }, new String[] { "host3" }, null, false);
    // Wait for 1st node to reach in maintenance state
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            try {
                DatanodeInfo datanodeInfo = null;
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isInMaintenance()) {
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    //check in maintenance node
    String fsckOut = runFsck(conf, 4, false, "/", "-maintenance", "-blockId", bIds[0]);
    assertTrue(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
    //check in maintenance node are not printed when not requested
    fsckOut = runFsck(conf, 4, false, "/", "-blockId", bIds[0]);
    assertFalse(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Aggregations

DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)75 Test (org.junit.Test)37 ArrayList (java.util.ArrayList)23 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)21 Path (org.apache.hadoop.fs.Path)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)13 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 HashMap (java.util.HashMap)9 Configuration (org.apache.hadoop.conf.Configuration)9 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 Node (org.apache.hadoop.net.Node)9 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)8 IOException (java.io.IOException)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Map (java.util.Map)6