Search in sources :

Example 16 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestDFSNetworkTopology method testAddAndRemoveTopology.

/**
   * Test the correctness of storage type info when nodes are added and removed.
   * @throws Exception
   */
@Test
public void testAddAndRemoveTopology() throws Exception {
    String[] newRack = { "/l1/d1/r1", "/l1/d1/r3", "/l1/d3/r3", "/l1/d3/r3" };
    String[] newHost = { "nhost1", "nhost2", "nhost3", "nhost4" };
    String[] newips = { "30.30.30.30", "31.31.31.31", "32.32.32.32", "33.33.33.33" };
    StorageType[] newTypes = { StorageType.DISK, StorageType.SSD, StorageType.SSD, StorageType.SSD };
    DatanodeDescriptor[] newDD = new DatanodeDescriptor[4];
    for (int i = 0; i < 4; i++) {
        DatanodeStorageInfo dsi = DFSTestUtil.createDatanodeStorageInfo("s" + newHost[i], newips[i], newRack[i], newHost[i], newTypes[i], null);
        newDD[i] = dsi.getDatanodeDescriptor();
        CLUSTER.add(newDD[i]);
    }
    DFSTopologyNodeImpl d1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d1");
    HashMap<String, EnumMap<StorageType, Integer>> d1info = d1.getChildrenStorageInfo();
    assertEquals(3, d1info.keySet().size());
    assertTrue(d1info.get("r1").size() == 2 && d1info.get("r2").size() == 2 && d1info.get("r3").size() == 1);
    assertEquals(2, (int) d1info.get("r1").get(StorageType.DISK));
    assertEquals(1, (int) d1info.get("r1").get(StorageType.ARCHIVE));
    assertEquals(2, (int) d1info.get("r2").get(StorageType.DISK));
    assertEquals(1, (int) d1info.get("r2").get(StorageType.ARCHIVE));
    assertEquals(1, (int) d1info.get("r3").get(StorageType.SSD));
    DFSTopologyNodeImpl d3 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d3");
    HashMap<String, EnumMap<StorageType, Integer>> d3info = d3.getChildrenStorageInfo();
    assertEquals(1, d3info.keySet().size());
    assertTrue(d3info.get("r3").size() == 1);
    assertEquals(2, (int) d3info.get("r3").get(StorageType.SSD));
    DFSTopologyNodeImpl l1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1");
    HashMap<String, EnumMap<StorageType, Integer>> l1info = l1.getChildrenStorageInfo();
    assertEquals(3, l1info.keySet().size());
    assertTrue(l1info.get("d1").size() == 3 && l1info.get("d2").size() == 3 && l1info.get("d3").size() == 1);
    assertEquals(4, (int) l1info.get("d1").get(StorageType.DISK));
    assertEquals(2, (int) l1info.get("d1").get(StorageType.ARCHIVE));
    assertEquals(1, (int) l1info.get("d1").get(StorageType.SSD));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.SSD));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.RAM_DISK));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.DISK));
    assertEquals(2, (int) l1info.get("d3").get(StorageType.SSD));
    for (int i = 0; i < 4; i++) {
        CLUSTER.remove(newDD[i]);
    }
    // /d1/r3 should've been out, /d1/r1 should've been resumed
    DFSTopologyNodeImpl nd1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d1");
    HashMap<String, EnumMap<StorageType, Integer>> nd1info = nd1.getChildrenStorageInfo();
    assertEquals(2, nd1info.keySet().size());
    assertTrue(nd1info.get("r1").size() == 2 && nd1info.get("r2").size() == 2);
    assertEquals(1, (int) nd1info.get("r1").get(StorageType.DISK));
    assertEquals(1, (int) nd1info.get("r1").get(StorageType.ARCHIVE));
    assertEquals(2, (int) nd1info.get("r2").get(StorageType.DISK));
    assertEquals(1, (int) nd1info.get("r2").get(StorageType.ARCHIVE));
    // /l1/d3 should've been out, and /l1/d1 should've been resumed
    DFSTopologyNodeImpl nl1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1");
    HashMap<String, EnumMap<StorageType, Integer>> nl1info = nl1.getChildrenStorageInfo();
    assertEquals(2, nl1info.keySet().size());
    assertTrue(l1info.get("d1").size() == 2 && l1info.get("d2").size() == 3);
    assertEquals(2, (int) nl1info.get("d1").get(StorageType.ARCHIVE));
    assertEquals(3, (int) nl1info.get("d1").get(StorageType.DISK));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.DISK));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.RAM_DISK));
    assertEquals(1, (int) l1info.get("d2").get(StorageType.SSD));
    assertNull(CLUSTER.getNode("/l1/d3"));
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) StorageType(org.apache.hadoop.fs.StorageType) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) EnumMap(java.util.EnumMap) Test(org.junit.Test)

Example 17 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestIncrementalBrVariations method testNnLearnsNewStorages.

/**
   * Verify that the NameNode can learn about new storages from incremental
   * block reports.
   * This tests the fix for the error condition seen in HDFS-6904.
   *
   * @throws IOException
   * @throws InterruptedException
   */
@Test(timeout = 60000)
public void testNnLearnsNewStorages() throws IOException, InterruptedException {
    // Generate a report for a fake block on a fake storage.
    final String newStorageUuid = UUID.randomUUID().toString();
    final DatanodeStorage newStorage = new DatanodeStorage(newStorageUuid);
    StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(getDummyBlock(), BlockStatus.RECEIVED_BLOCK, newStorage);
    // Send the report to the NN.
    cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);
    // IBRs are async, make sure the NN processes all of them.
    cluster.getNamesystem().getBlockManager().flushBlockOps();
    // Make sure that the NN has learned of the new storage.
    DatanodeStorageInfo storageInfo = cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn0.getDatanodeId()).getStorageInfo(newStorageUuid);
    assertNotNull(storageInfo);
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Test(org.junit.Test)

Example 18 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestReconstructStripedBlocks method doTestMissingStripedBlock.

/**
   * Start GROUP_SIZE + 1 datanodes.
   * Inject striped blocks to first GROUP_SIZE datanodes.
   * Then make numOfBusy datanodes busy, make numOfMissed datanodes missed.
   * Then trigger BlockManager to compute reconstruction works. (so all
   * reconstruction work will be scheduled to the last datanode)
   * Finally, verify the reconstruction work of the last datanode.
   */
private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) throws Exception {
    Configuration conf = new HdfsConfiguration();
    initConf(conf);
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1).build();
    try {
        cluster.waitActive();
        final int numBlocks = 4;
        DFSTestUtil.createStripedFile(cluster, filePath, dirPath, numBlocks, 1, true);
        // all blocks will be located at first GROUP_SIZE DNs, the last DN is
        // empty because of the util function createStripedFile
        // make sure the file is complete in NN
        final INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile();
        assertFalse(fileNode.isUnderConstruction());
        assertTrue(fileNode.isStriped());
        BlockInfo[] blocks = fileNode.getBlocks();
        assertEquals(numBlocks, blocks.length);
        for (BlockInfo blk : blocks) {
            assertTrue(blk.isStriped());
            assertTrue(blk.isComplete());
            assertEquals(cellSize * dataBlocks, blk.getNumBytes());
            final BlockInfoStriped sb = (BlockInfoStriped) blk;
            assertEquals(groupSize, sb.numNodes());
        }
        final BlockManager bm = cluster.getNamesystem().getBlockManager();
        BlockInfo firstBlock = fileNode.getBlocks()[0];
        DatanodeStorageInfo[] storageInfos = bm.getStorages(firstBlock);
        // make numOfBusy nodes busy
        int i = 0;
        for (; i < numOfBusy; i++) {
            DatanodeDescriptor busyNode = storageInfos[i].getDatanodeDescriptor();
            for (int j = 0; j < maxReplicationStreams + 1; j++) {
                BlockManagerTestUtil.addBlockToBeReplicated(busyNode, new Block(j), new DatanodeStorageInfo[] { storageInfos[0] });
            }
        }
        // make numOfMissed internal blocks missed
        for (; i < numOfBusy + numOfMissed; i++) {
            DatanodeDescriptor missedNode = storageInfos[i].getDatanodeDescriptor();
            assertEquals(numBlocks, missedNode.numBlocks());
            bm.getDatanodeManager().removeDatanode(missedNode);
        }
        BlockManagerTestUtil.getComputedDatanodeWork(bm);
        // all the reconstruction work will be scheduled on the last DN
        DataNode lastDn = cluster.getDataNodes().get(groupSize);
        DatanodeDescriptor last = bm.getDatanodeManager().getDatanode(lastDn.getDatanodeId());
        assertEquals("Counting the number of outstanding EC tasks", numBlocks, last.getNumberOfBlocksToBeErasureCoded());
        List<BlockECReconstructionInfo> reconstruction = last.getErasureCodeCommand(numBlocks);
        for (BlockECReconstructionInfo info : reconstruction) {
            assertEquals(1, info.getTargetDnInfos().length);
            assertEquals(last, info.getTargetDnInfos()[0]);
            assertEquals(info.getSourceDnInfos().length, info.getLiveBlockIndices().length);
            if (groupSize - numOfMissed == dataBlocks) {
                // It's a QUEUE_HIGHEST_PRIORITY block, so the busy DNs will be chosen
                // to make sure we have NUM_DATA_BLOCKS DNs to do reconstruction
                // work.
                assertEquals(dataBlocks, info.getSourceDnInfos().length);
            } else {
                // The block has no highest priority, so we don't use the busy DNs as
                // sources
                assertEquals(groupSize - numOfMissed - numOfBusy, info.getSourceDnInfos().length);
            }
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)

Example 19 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestAddStripedBlocks method checkStripedBlockUC.

private void checkStripedBlockUC(BlockInfoStriped block, boolean checkReplica) {
    assertEquals(0, block.numNodes());
    Assert.assertFalse(block.isComplete());
    Assert.assertEquals(dataBlocks, block.getDataBlockNum());
    Assert.assertEquals(parityBlocks, block.getParityBlockNum());
    Assert.assertEquals(0, block.getBlockId() & HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);
    Assert.assertEquals(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, block.getBlockUCState());
    if (checkReplica) {
        Assert.assertEquals(groupSize, block.getUnderConstructionFeature().getNumExpectedLocations());
        DatanodeStorageInfo[] storages = block.getUnderConstructionFeature().getExpectedStorageLocations();
        for (DataNode dn : cluster.getDataNodes()) {
            Assert.assertTrue(includeDataNode(dn.getDatanodeId(), storages));
        }
    }
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode)

Example 20 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestAddBlockRetry method testRetryAddBlockWhileInChooseTarget.

/**
   * Retry addBlock() while another thread is in chooseTarget().
   * See HDFS-4452.
   */
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
    final String src = "/testRetryAddBlockWhileInChooseTarget";
    final FSNamesystem ns = cluster.getNamesystem();
    final NamenodeProtocols nn = cluster.getNameNodeRpc();
    // create file
    nn.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 3, 1024, null);
    // start first addBlock()
    LOG.info("Starting first addBlock for " + src);
    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
    ns.readLock();
    FSDirWriteFileOp.ValidateAddBlockResult r;
    FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
    try {
        r = FSDirWriteFileOp.validateAddBlock(ns, pc, src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, onRetryBlock);
    } finally {
        ns.readUnlock();
        ;
    }
    DatanodeStorageInfo[] targets = FSDirWriteFileOp.chooseTargetForNewBlock(ns.getBlockManager(), src, null, null, null, r);
    assertNotNull("Targets must be generated", targets);
    // run second addBlock()
    LOG.info("Starting second addBlock for " + src);
    nn.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
    assertTrue("Penultimate block must be complete", checkFileProgress(src, false));
    LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
    assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
    LocatedBlock lb2 = lbs.get(0);
    assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
    // continue first addBlock()
    ns.writeLock();
    LocatedBlock newBlock;
    try {
        newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src, HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
    } finally {
        ns.writeUnlock();
    }
    assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
    // check locations
    lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
    assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
    LocatedBlock lb1 = lbs.get(0);
    assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) Test(org.junit.Test)

Aggregations

DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)25 Test (org.junit.Test)10 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)8 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ArrayList (java.util.ArrayList)5 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)5 Configuration (org.apache.hadoop.conf.Configuration)4 Block (org.apache.hadoop.hdfs.protocol.Block)4 Node (org.apache.hadoop.net.Node)4 IOException (java.io.IOException)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 BlockECReconstructionInfo (org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo)3 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)3 FileNotFoundException (java.io.FileNotFoundException)2 HashSet (java.util.HashSet)2 StorageType (org.apache.hadoop.fs.StorageType)2