Search in sources :

Example 21 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestDatanodeManager method testRemoveIncludedNode.

/**
   * Test whether removing a host from the includes list without adding it to
   * the excludes list will exclude it from data node reports.
   */
@Test
public void testRemoveIncludedNode() throws IOException {
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    // Set the write lock so that the DatanodeManager can start
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
    HostFileManager hm = new HostFileManager();
    HostSet noNodes = new HostSet();
    HostSet oneNode = new HostSet();
    HostSet twoNodes = new HostSet();
    DatanodeRegistration dr1 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123", 12345, 12345, 12345, 12345), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
    DatanodeRegistration dr2 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-234", 23456, 23456, 23456, 23456), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
    twoNodes.add(entry("127.0.0.1:12345"));
    twoNodes.add(entry("127.0.0.1:23456"));
    oneNode.add(entry("127.0.0.1:23456"));
    hm.refresh(twoNodes, noNodes);
    Whitebox.setInternalState(dm, "hostConfigManager", hm);
    // Register two data nodes to simulate them coming up.
    // We need to add two nodes, because if we have only one node, removing it
    // will cause the includes list to be empty, which means all hosts will be
    // allowed.
    dm.registerDatanode(dr1);
    dm.registerDatanode(dr2);
    // Make sure that both nodes are reported
    List<DatanodeDescriptor> both = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    // Sort the list so that we know which one is which
    Collections.sort(both);
    Assert.assertEquals("Incorrect number of hosts reported", 2, both.size());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", both.get(0).getInfoAddr());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", both.get(1).getInfoAddr());
    // Remove one node from includes, but do not add it to excludes.
    hm.refresh(oneNode, noNodes);
    // Make sure that only one node is still reported
    List<DatanodeDescriptor> onlyOne = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    Assert.assertEquals("Incorrect number of hosts reported", 1, onlyOne.size());
    Assert.assertEquals("Unexpected host reported", "127.0.0.1:23456", onlyOne.get(0).getInfoAddr());
    // Remove all nodes from includes
    hm.refresh(noNodes, noNodes);
    // Check that both nodes are reported again
    List<DatanodeDescriptor> bothAgain = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    // Sort the list so that we know which one is which
    Collections.sort(bothAgain);
    Assert.assertEquals("Incorrect number of hosts reported", 2, bothAgain.size());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", bothAgain.get(0).getInfoAddr());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Configuration(org.apache.hadoop.conf.Configuration) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 22 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestDecommissioningStatus method checkDFSAdminDecommissionStatus.

private void checkDFSAdminDecommissionStatus(List<DatanodeDescriptor> expectedDecomm, DistributedFileSystem dfs, DFSAdmin admin) throws IOException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    PrintStream ps = new PrintStream(baos);
    PrintStream oldOut = System.out;
    System.setOut(ps);
    try {
        // Parse DFSAdmin just to check the count
        admin.report(new String[] { "-decommissioning" }, 0);
        String[] lines = baos.toString().split("\n");
        Integer num = null;
        int count = 0;
        for (String line : lines) {
            if (line.startsWith("Decommissioning datanodes")) {
                // Pull out the "(num)" and parse it into an int
                String temp = line.split(" ")[2];
                num = Integer.parseInt((String) temp.subSequence(1, temp.length() - 2));
            }
            if (line.contains("Decommission in progress")) {
                count++;
            }
        }
        assertTrue("No decommissioning output", num != null);
        assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(), num.intValue());
        assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(), count);
        // Check Java API for correct contents
        List<DatanodeInfo> decomming = new ArrayList<DatanodeInfo>(Arrays.asList(dfs.getDataNodeStats(DatanodeReportType.DECOMMISSIONING)));
        assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(), decomming.size());
        for (DatanodeID id : expectedDecomm) {
            assertTrue("Did not find expected decomming DN " + id, decomming.contains(id));
        }
    } finally {
        System.setOut(oldOut);
    }
}
Also used : PrintStream(java.io.PrintStream) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ArrayList(java.util.ArrayList) ByteArrayOutputStream(org.apache.commons.io.output.ByteArrayOutputStream)

Example 23 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestDecommissioningStatus method testDecommissionDeadDN.

/**
   * Verify the support for decommissioning a datanode that is already dead.
   * Under this scenario the datanode should immediately be marked as
   * DECOMMISSIONED
   */
@Test(timeout = 120000)
public void testDecommissionDeadDN() throws Exception {
    Logger log = Logger.getLogger(DecommissionManager.class);
    log.setLevel(Level.DEBUG);
    DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
    String dnName = dnID.getXferAddr();
    DataNodeProperties stoppedDN = cluster.stopDataNode(0);
    DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(), false, 30000);
    FSNamesystem fsn = cluster.getNamesystem();
    final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
    DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
    decommissionNode(dnName);
    dm.refreshNodes(conf);
    BlockManagerTestUtil.recheckDecommissionState(dm);
    assertTrue(dnDescriptor.isDecommissioned());
    // Add the node back
    cluster.restartDataNode(stoppedDN, true);
    cluster.waitActive();
    // Call refreshNodes on FSNamesystem with empty exclude file to remove the
    // datanode from decommissioning list and make it available again.
    hostsFileWriter.initExcludeHost("");
    dm.refreshNodes(conf);
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) Logger(org.apache.log4j.Logger) Test(org.junit.Test) AdminStatesBaseTest(org.apache.hadoop.hdfs.AdminStatesBaseTest)

Example 24 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithCloseAndNonExistantTarget.

@Test
public void testCommitBlockSynchronizationWithCloseAndNonExistantTarget() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[] { new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0) };
    String[] storageIDs = new String[] { "fake-storage-ID" };
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
    // Repeat the call to make sure it returns true
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, storageIDs);
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 25 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithDelete.

@Test
public void testCommitBlockSynchronizationWithDelete() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
    // Simulate removing the last block from the file.
    doReturn(null).when(file).removeLastBlock(any(Block.class));
    // Repeat the call to make sure it does not throw
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Aggregations

DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)51 Test (org.junit.Test)36 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)18 Configuration (org.apache.hadoop.conf.Configuration)13 Path (org.apache.hadoop.fs.Path)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)10 IOException (java.io.IOException)8 InetSocketAddress (java.net.InetSocketAddress)8 Peer (org.apache.hadoop.hdfs.net.Peer)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Block (org.apache.hadoop.hdfs.protocol.Block)7 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)6 Socket (java.net.Socket)5 FileSystem (org.apache.hadoop.fs.FileSystem)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)5