Search in sources :

Example 1 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestDecommission method testCountOnDecommissionedNodeList.

/**
   * Fetching Live DataNodes by passing removeDecommissionedNode value as
   * false- returns LiveNodeList with Node in Decommissioned state
   * true - returns LiveNodeList without Node in Decommissioned state
   * @throws InterruptedException
   */
@Test
public void testCountOnDecommissionedNodeList() throws IOException {
    getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
    try {
        startCluster(1, 1);
        ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(1);
        namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(1));
        // Move datanode1 to Decommissioned state
        ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
        takeNodeOutofService(0, null, 0, decommissionedNode, AdminStates.DECOMMISSIONED);
        FSNamesystem ns = getCluster().getNamesystem(0);
        DatanodeManager datanodeManager = ns.getBlockManager().getDatanodeManager();
        List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
        // fetchDatanode with false should return livedecommisioned node
        datanodeManager.fetchDatanodes(live, null, false);
        assertTrue(1 == live.size());
        // fetchDatanode with true should not return livedecommisioned node
        datanodeManager.fetchDatanodes(live, null, true);
        assertTrue(0 == live.size());
    } finally {
        shutdownCluster();
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 2 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestBlockReplacement method testDeletedBlockWhenAddBlockIsInEdit.

/**
   * Standby namenode doesn't queue Delete block request when the add block
   * request is in the edit log which are yet to be read.
   * @throws Exception
   */
@Test
public void testDeletedBlockWhenAddBlockIsInEdit() throws Exception {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
    DFSClient client = null;
    try {
        cluster.waitActive();
        assertEquals("Number of namenodes is not 2", 2, cluster.getNumNameNodes());
        // Transitioning the namenode 0 to active.
        cluster.transitionToActive(0);
        assertTrue("Namenode 0 should be in active state", cluster.getNameNode(0).isActiveState());
        assertTrue("Namenode 1 should be in standby state", cluster.getNameNode(1).isStandbyState());
        // Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover
        // to true.
        DataNodeTestUtils.triggerHeartbeat(cluster.getDataNodes().get(0));
        FileSystem fs = cluster.getFileSystem(0);
        // Trigger blockReport to mark DatanodeStorageInfo#blockContentsStale
        // to false.
        cluster.getDataNodes().get(0).triggerBlockReport(new BlockReportOptions.Factory().setIncremental(false).build());
        Path fileName = new Path("/tmp.txt");
        // create a file with one block
        DFSTestUtil.createFile(fs, fileName, 10L, (short) 1, 1234L);
        DFSTestUtil.waitReplication(fs, fileName, (short) 1);
        client = new DFSClient(cluster.getFileSystem(0).getUri(), conf);
        List<LocatedBlock> locatedBlocks = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
        assertTrue(locatedBlocks.size() == 1);
        assertTrue(locatedBlocks.get(0).getLocations().length == 1);
        // add a second datanode to the cluster
        cluster.startDataNodes(conf, 1, true, null, null, null, null);
        assertEquals("Number of datanodes should be 2", 2, cluster.getDataNodes().size());
        DataNode dn0 = cluster.getDataNodes().get(0);
        DataNode dn1 = cluster.getDataNodes().get(1);
        String activeNNBPId = cluster.getNamesystem(0).getBlockPoolId();
        DatanodeDescriptor sourceDnDesc = NameNodeAdapter.getDatanode(cluster.getNamesystem(0), dn0.getDNRegistrationForBP(activeNNBPId));
        DatanodeDescriptor destDnDesc = NameNodeAdapter.getDatanode(cluster.getNamesystem(0), dn1.getDNRegistrationForBP(activeNNBPId));
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
        LOG.info("replaceBlock:  " + replaceBlock(block, (DatanodeInfo) sourceDnDesc, (DatanodeInfo) sourceDnDesc, (DatanodeInfo) destDnDesc));
        // Waiting for the FsDatasetAsyncDsikService to delete the block
        for (int tries = 0; tries < 20; tries++) {
            Thread.sleep(1000);
            // Triggering the deletion block report to report the deleted block
            // to namnemode
            DataNodeTestUtils.triggerDeletionReport(cluster.getDataNodes().get(0));
            locatedBlocks = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
            // If block was deleted and only on 1 datanode then break out
            if (locatedBlocks.get(0).getLocations().length == 1) {
                break;
            }
        }
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        assertTrue("Namenode 1 should be in active state", cluster.getNameNode(1).isActiveState());
        assertTrue("Namenode 0 should be in standby state", cluster.getNameNode(0).isStandbyState());
        client.close();
        // Opening a new client for new active  namenode
        client = new DFSClient(cluster.getFileSystem(1).getUri(), conf);
        List<LocatedBlock> locatedBlocks1 = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
        assertEquals(1, locatedBlocks1.size());
        assertEquals("The block should be only on 1 datanode ", 1, locatedBlocks1.get(0).getLocations().length);
    } finally {
        IOUtils.cleanup(null, client);
        cluster.shutdown();
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BlockReportOptions(org.apache.hadoop.hdfs.client.BlockReportOptions) Test(org.junit.Test)

Example 3 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class BlockReportTestBase method testInterleavedBlockReports.

// See HDFS-10301
@Test(timeout = 300000)
public void testInterleavedBlockReports() throws IOException, ExecutionException, InterruptedException {
    int numConcurrentBlockReports = 3;
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    final String poolId = cluster.getNamesystem().getBlockPoolId();
    LOG.info("Block pool id: " + poolId);
    final DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    final StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
    // Get the list of storage ids associated with the datanode
    // before the test
    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    final DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().getDatanode(dn.getDatanodeId());
    DatanodeStorageInfo[] storageInfos = dnDescriptor.getStorageInfos();
    // Send the block report concurrently using
    // numThreads=numConcurrentBlockReports
    ExecutorService executorService = Executors.newFixedThreadPool(numConcurrentBlockReports);
    List<Future<Void>> futureList = new ArrayList<>(numConcurrentBlockReports);
    for (int i = 0; i < numConcurrentBlockReports; i++) {
        futureList.add(executorService.submit(new Callable<Void>() {

            @Override
            public Void call() throws IOException {
                sendBlockReports(dnR, poolId, reports);
                return null;
            }
        }));
    }
    for (Future<Void> future : futureList) {
        future.get();
    }
    executorService.shutdown();
    // Verify that the storages match before and after the test
    Assert.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Test(org.junit.Test)

Example 4 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestNameNodeMetrics method testStaleNodes.

/** Test metrics indicating the number of stale DataNodes */
@Test
public void testStaleNodes() throws Exception {
    // Set two datanodes as stale
    for (int i = 0; i < 2; i++) {
        DataNode dn = cluster.getDataNodes().get(i);
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
        long staleInterval = CONF.getLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
        DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId());
        DFSTestUtil.resetLastUpdatesWithOffset(dnDes, -(staleInterval + 1));
    }
    // Let HeartbeatManager to check heartbeat
    BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem().getBlockManager());
    assertGauge("StaleDataNodes", 2, getMetrics(NS_METRICS));
    // Reset stale datanodes
    for (int i = 0; i < 2; i++) {
        DataNode dn = cluster.getDataNodes().get(i);
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
        DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId());
        DFSTestUtil.resetLastUpdatesWithOffset(dnDes, 0);
    }
    // Let HeartbeatManager to refresh
    BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem().getBlockManager());
    assertGauge("StaleDataNodes", 0, getMetrics(NS_METRICS));
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Test(org.junit.Test)

Example 5 with DatanodeDescriptor

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.

the class TestNetworkTopology method testCreateInvalidTopology.

@Test
public void testCreateInvalidTopology() throws Exception {
    NetworkTopology invalCluster = NetworkTopology.getInstance(new Configuration());
    DatanodeDescriptor[] invalDataNodes = new DatanodeDescriptor[] { DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"), DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"), DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1") };
    invalCluster.add(invalDataNodes[0]);
    invalCluster.add(invalDataNodes[1]);
    try {
        invalCluster.add(invalDataNodes[2]);
        fail("expected InvalidTopologyException");
    } catch (NetworkTopology.InvalidTopologyException e) {
        assertTrue(e.getMessage().startsWith("Failed to add "));
        assertTrue(e.getMessage().contains("You cannot have a rack and a non-rack node at the same " + "level of the network topology."));
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) Test(org.junit.Test)

Aggregations

DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)75 Test (org.junit.Test)37 ArrayList (java.util.ArrayList)23 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)21 Path (org.apache.hadoop.fs.Path)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)13 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)12 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)11 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)11 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 HashMap (java.util.HashMap)9 Configuration (org.apache.hadoop.conf.Configuration)9 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 Node (org.apache.hadoop.net.Node)9 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)8 IOException (java.io.IOException)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Map (java.util.Map)6