Search in sources :

Example 11 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestBlockManager method testSafeModeIBRBeforeFirstFullBR.

/**
   * test when NN starts and in same mode, it receives an incremental blockReport
   * firstly. Then receives first full block report.
   */
@Test
public void testSafeModeIBRBeforeFirstFullBR() throws Exception {
    // pretend to be in safemode
    doReturn(true).when(fsn).isInStartupSafeMode();
    DatanodeDescriptor node = nodes.get(0);
    DatanodeStorageInfo ds = node.getStorageInfos()[0];
    node.setAlive(true);
    DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, "");
    // register new node
    bm.getDatanodeManager().registerDatanode(nodeReg);
    bm.getDatanodeManager().addDatanode(node);
    assertEquals(node, bm.getDatanodeManager().getDatanode(node));
    assertEquals(0, ds.getBlockReportCount());
    // Build a incremental report
    List<ReceivedDeletedBlockInfo> rdbiList = new ArrayList<>();
    // Build a full report
    BlockListAsLongs.Builder builder = BlockListAsLongs.builder();
    // blk_42 is finalized.
    // arbitrary
    long receivedBlockId = 42;
    BlockInfo receivedBlock = addBlockToBM(receivedBlockId);
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
    builder.add(new FinalizedReplica(receivedBlock, null, null));
    // blk_43 is under construction.
    long receivingBlockId = 43;
    BlockInfo receivingBlock = addUcBlockToBM(receivingBlockId);
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
    builder.add(new ReplicaBeingWritten(receivingBlock, null, null, null));
    // blk_44 has 2 records in IBR. It's finalized. So full BR has 1 record.
    long receivingReceivedBlockId = 44;
    BlockInfo receivingReceivedBlock = addBlockToBM(receivingReceivedBlockId);
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVING_BLOCK, null));
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(receivingReceivedBlock), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
    builder.add(new FinalizedReplica(receivingReceivedBlock, null, null));
    // blk_45 is not in full BR, because it's deleted.
    long ReceivedDeletedBlockId = 45;
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null));
    rdbiList.add(new ReceivedDeletedBlockInfo(new Block(ReceivedDeletedBlockId), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null));
    // blk_46 exists in DN for a long time, so it's in full BR, but not in IBR.
    long existedBlockId = 46;
    BlockInfo existedBlock = addBlockToBM(existedBlockId);
    builder.add(new FinalizedReplica(existedBlock, null, null));
    // process IBR and full BR
    StorageReceivedDeletedBlocks srdb = new StorageReceivedDeletedBlocks(new DatanodeStorage(ds.getStorageID()), rdbiList.toArray(new ReceivedDeletedBlockInfo[rdbiList.size()]));
    bm.processIncrementalBlockReport(node, srdb);
    // Make sure it's the first full report
    assertEquals(0, ds.getBlockReportCount());
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), builder.build(), new BlockReportContext(1, 0, System.nanoTime(), 0, true));
    assertEquals(1, ds.getBlockReportCount());
    // verify the storage info is correct
    assertTrue(bm.getStoredBlock(new Block(receivedBlockId)).findStorageInfo(ds) >= 0);
    assertTrue(bm.getStoredBlock(new Block(receivingBlockId)).getUnderConstructionFeature().getNumExpectedLocations() > 0);
    assertTrue(bm.getStoredBlock(new Block(receivingReceivedBlockId)).findStorageInfo(ds) >= 0);
    assertNull(bm.getStoredBlock(new Block(ReceivedDeletedBlockId)));
    assertTrue(bm.getStoredBlock(new Block(existedBlock)).findStorageInfo(ds) >= 0);
}
Also used : ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ArrayList(java.util.ArrayList) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 12 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestBlockManager method testSafeModeIBR.

@Test
public void testSafeModeIBR() throws Exception {
    DatanodeDescriptor node = spy(nodes.get(0));
    DatanodeStorageInfo ds = node.getStorageInfos()[0];
    node.setAlive(true);
    DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, "");
    // pretend to be in safemode
    doReturn(true).when(fsn).isInStartupSafeMode();
    // register new node
    bm.getDatanodeManager().registerDatanode(nodeReg);
    // swap in spy    
    bm.getDatanodeManager().addDatanode(node);
    assertEquals(node, bm.getDatanodeManager().getDatanode(node));
    assertEquals(0, ds.getBlockReportCount());
    // send block report, should be processed
    reset(node);
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), BlockListAsLongs.EMPTY, null);
    assertEquals(1, ds.getBlockReportCount());
    // send block report again, should NOT be processed
    reset(node);
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), BlockListAsLongs.EMPTY, null);
    assertEquals(1, ds.getBlockReportCount());
    // re-register as if node restarted, should update existing node
    bm.getDatanodeManager().removeDatanode(node);
    reset(node);
    bm.getDatanodeManager().registerDatanode(nodeReg);
    verify(node).updateRegInfo(nodeReg);
    // send block report, should be processed after restart
    reset(node);
    bm.processReport(node, new DatanodeStorage(ds.getStorageID()), BlockListAsLongs.EMPTY, null);
    // Reinitialize as registration with empty storage list pruned
    // node.storageMap.
    ds = node.getStorageInfos()[0];
    assertEquals(1, ds.getBlockReportCount());
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Test(org.junit.Test)

Example 13 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDatanodeManager method HelperFunction.

/**
   * Helper function that tests the DatanodeManagers SortedBlock function
   * we invoke this function with and without topology scripts
   *
   * @param scriptFileName - Script Name or null
   *
   * @throws URISyntaxException
   * @throws IOException
   */
public void HelperFunction(String scriptFileName) throws URISyntaxException, IOException {
    // create the DatanodeManager which will be tested
    Configuration conf = new Configuration();
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    if (scriptFileName != null && !scriptFileName.isEmpty()) {
        URL shellScript = getClass().getResource(scriptFileName);
        Path resourcePath = Paths.get(shellScript.toURI());
        FileUtil.setExecutable(resourcePath.toFile(), true);
        conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, resourcePath.toString());
    }
    DatanodeManager dm = mockDatanodeManager(fsn, conf);
    // register 5 datanodes, each with different storage ID and type
    DatanodeInfo[] locs = new DatanodeInfo[5];
    String[] storageIDs = new String[5];
    StorageType[] storageTypes = new StorageType[] { StorageType.ARCHIVE, StorageType.DEFAULT, StorageType.DISK, StorageType.RAM_DISK, StorageType.SSD };
    for (int i = 0; i < 5; i++) {
        // register new datanode
        String uuid = "UUID-" + i;
        String ip = "IP-" + i;
        DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
        Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
        Mockito.when(dr.getIpAddr()).thenReturn(ip);
        Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
        Mockito.when(dr.getXferPort()).thenReturn(9000);
        Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
        dm.registerDatanode(dr);
        // get location and storage information
        locs[i] = dm.getDatanode(uuid);
        storageIDs[i] = "storageID-" + i;
    }
    // set first 2 locations as decomissioned
    locs[0].setDecommissioned();
    locs[1].setDecommissioned();
    // create LocatedBlock with above locations
    ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
    LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
    List<LocatedBlock> blocks = new ArrayList<>();
    blocks.add(block);
    final String targetIp = locs[4].getIpAddr();
    // sort block locations
    dm.sortLocatedBlocks(targetIp, blocks);
    // check that storage IDs/types are aligned with datanode locs
    DatanodeInfo[] sortedLocs = block.getLocations();
    storageIDs = block.getStorageIDs();
    storageTypes = block.getStorageTypes();
    assertThat(sortedLocs.length, is(5));
    assertThat(storageIDs.length, is(5));
    assertThat(storageTypes.length, is(5));
    for (int i = 0; i < sortedLocs.length; i++) {
        assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageID(), is(storageIDs[i]));
        assertThat(((DatanodeInfoWithStorage) sortedLocs[i]).getStorageType(), is(storageTypes[i]));
    }
    // Ensure the local node is first.
    assertThat(sortedLocs[0].getIpAddr(), is(targetIp));
    // Ensure the two decommissioned DNs were moved to the end.
    assertThat(sortedLocs[sortedLocs.length - 1].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED));
    assertThat(sortedLocs[sortedLocs.length - 2].getAdminState(), is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
Also used : Path(java.nio.file.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) URL(java.net.URL) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Example 14 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDatanodeManager method testNumVersionsCorrectAfterReregister.

/**
   * This test checks that if a node is re-registered with a new software
   * version after the heartbeat expiry interval but before the HeartbeatManager
   * has a chance to detect this and remove it, the node's version will still
   * be correctly decremented.
   */
@Test
public void testNumVersionsCorrectAfterReregister() throws IOException, InterruptedException {
    //Create the DatanodeManager which will be tested
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    Configuration conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 0);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 10);
    DatanodeManager dm = mockDatanodeManager(fsn, conf);
    String storageID = "someStorageID1";
    String ip = "someIP" + storageID;
    // Register then reregister the same node but with a different version
    for (int i = 0; i <= 1; i++) {
        dm.registerDatanode(new DatanodeRegistration(new DatanodeID(ip, "", storageID, 9000, 0, 0, 0), null, null, "version" + i));
        if (i == 0) {
            Thread.sleep(25);
        }
    }
    //Verify DatanodeManager has the correct count
    Map<String, Integer> mapToCheck = dm.getDatanodesSoftwareVersions();
    assertNull("should be no more version0 nodes", mapToCheck.get("version0"));
    assertEquals("should be one version1 node", mapToCheck.get("version1").intValue(), 1);
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Configuration(org.apache.hadoop.conf.Configuration) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 15 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDatanodeManager method testRemoveIncludedNode.

/**
   * Test whether removing a host from the includes list without adding it to
   * the excludes list will exclude it from data node reports.
   */
@Test
public void testRemoveIncludedNode() throws IOException {
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    // Set the write lock so that the DatanodeManager can start
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
    HostFileManager hm = new HostFileManager();
    HostSet noNodes = new HostSet();
    HostSet oneNode = new HostSet();
    HostSet twoNodes = new HostSet();
    DatanodeRegistration dr1 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123", 12345, 12345, 12345, 12345), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
    DatanodeRegistration dr2 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-234", 23456, 23456, 23456, 23456), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
    twoNodes.add(entry("127.0.0.1:12345"));
    twoNodes.add(entry("127.0.0.1:23456"));
    oneNode.add(entry("127.0.0.1:23456"));
    hm.refresh(twoNodes, noNodes);
    Whitebox.setInternalState(dm, "hostConfigManager", hm);
    // Register two data nodes to simulate them coming up.
    // We need to add two nodes, because if we have only one node, removing it
    // will cause the includes list to be empty, which means all hosts will be
    // allowed.
    dm.registerDatanode(dr1);
    dm.registerDatanode(dr2);
    // Make sure that both nodes are reported
    List<DatanodeDescriptor> both = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    // Sort the list so that we know which one is which
    Collections.sort(both);
    Assert.assertEquals("Incorrect number of hosts reported", 2, both.size());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", both.get(0).getInfoAddr());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", both.get(1).getInfoAddr());
    // Remove one node from includes, but do not add it to excludes.
    hm.refresh(oneNode, noNodes);
    // Make sure that only one node is still reported
    List<DatanodeDescriptor> onlyOne = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    Assert.assertEquals("Incorrect number of hosts reported", 1, onlyOne.size());
    Assert.assertEquals("Unexpected host reported", "127.0.0.1:23456", onlyOne.get(0).getInfoAddr());
    // Remove all nodes from includes
    hm.refresh(noNodes, noNodes);
    // Check that both nodes are reported again
    List<DatanodeDescriptor> bothAgain = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    // Sort the list so that we know which one is which
    Collections.sort(bothAgain);
    Assert.assertEquals("Incorrect number of hosts reported", 2, bothAgain.size());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", bothAgain.get(0).getInfoAddr());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Configuration(org.apache.hadoop.conf.Configuration) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)48 Test (org.junit.Test)36 Configuration (org.apache.hadoop.conf.Configuration)19 Path (org.apache.hadoop.fs.Path)16 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)12 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)12 ArrayList (java.util.ArrayList)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 IOException (java.io.IOException)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)8 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)7 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)6 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)5