Search in sources :

Example 66 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDecommissionWithStriped method prepareBlockIndexAndTokenList.

private void prepareBlockIndexAndTokenList(List<LocatedBlock> lbs, List<HashMap<DatanodeInfo, Byte>> locToIndexList, List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
    for (LocatedBlock lb : lbs) {
        HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<DatanodeInfo, Byte>();
        locToIndexList.add(locToIndex);
        HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken = new HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>();
        locToTokenList.add(locToToken);
        DatanodeInfo[] di = lb.getLocations();
        LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
        for (int i = 0; i < di.length; i++) {
            locToIndex.put(di[i], stripedBlk.getBlockIndices()[i]);
            locToToken.put(di[i], stripedBlk.getBlockTokens()[i]);
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) HashMap(java.util.HashMap) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Token(org.apache.hadoop.security.token.Token)

Example 67 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDatanodeReport method assertReports.

static void assertReports(int numDatanodes, DatanodeReportType type, DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
    final DatanodeInfo[] infos = client.datanodeReport(type);
    assertEquals(numDatanodes, infos.length);
    final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
    assertEquals(numDatanodes, reports.length);
    for (int i = 0; i < infos.length; i++) {
        assertEquals(infos[i], reports[i].getDatanodeInfo());
        final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
        if (bpid != null) {
            //check storage
            final StorageReport[] computed = reports[i].getStorageReports();
            Arrays.sort(computed, CMP);
            final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
            Arrays.sort(expected, CMP);
            assertEquals(expected.length, computed.length);
            for (int j = 0; j < expected.length; j++) {
                assertEquals(expected[j].getStorage().getStorageID(), computed[j].getStorage().getStorageID());
            }
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport)

Example 68 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDatanodeReport method testDatanodeReport.

/**
   * This test attempts to different types of datanode report.
   */
@Test
public void testDatanodeReport() throws Exception {
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, // 0.5s
    500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
    try {
        //wait until the cluster is up
        cluster.waitActive();
        final String bpid = cluster.getNamesystem().getBlockPoolId();
        final List<DataNode> datanodes = cluster.getDataNodes();
        final DFSClient client = cluster.getFileSystem().dfs;
        assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
        assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
        assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);
        // bring down one datanode
        final DataNode last = datanodes.get(datanodes.size() - 1);
        LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
        last.shutdown();
        DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
        while (nodeInfo.length != 1) {
            try {
                Thread.sleep(500);
            } catch (Exception e) {
            }
            nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
        }
        assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
        assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
        assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
        Thread.sleep(5000);
        assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
    } finally {
        cluster.shutdown();
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) IOException(java.io.IOException) Test(org.junit.Test)

Example 69 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDecommission method testCountOnDecommissionedNodeList.

/**
   * Fetching Live DataNodes by passing removeDecommissionedNode value as
   * false- returns LiveNodeList with Node in Decommissioned state
   * true - returns LiveNodeList without Node in Decommissioned state
   * @throws InterruptedException
   */
@Test
public void testCountOnDecommissionedNodeList() throws IOException {
    getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    getConf().setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
    try {
        startCluster(1, 1);
        ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(1);
        namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(1));
        // Move datanode1 to Decommissioned state
        ArrayList<DatanodeInfo> decommissionedNode = namenodeDecomList.get(0);
        takeNodeOutofService(0, null, 0, decommissionedNode, AdminStates.DECOMMISSIONED);
        FSNamesystem ns = getCluster().getNamesystem(0);
        DatanodeManager datanodeManager = ns.getBlockManager().getDatanodeManager();
        List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
        // fetchDatanode with false should return livedecommisioned node
        datanodeManager.fetchDatanodes(live, null, false);
        assertTrue(1 == live.size());
        // fetchDatanode with true should not return livedecommisioned node
        datanodeManager.fetchDatanodes(live, null, true);
        assertTrue(0 == live.size());
    } finally {
        shutdownCluster();
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 70 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDecommission method testDecommissionWithOpenfile.

@Test(timeout = 120000)
public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
    LOG.info("Starting test testDecommissionWithOpenfile");
    //At most 4 nodes will be decommissioned
    startCluster(1, 7);
    FileSystem fileSys = getCluster().getFileSystem(0);
    FSNamesystem ns = getCluster().getNamesystem(0);
    String openFile = "/testDecommissionWithOpenfile.dat";
    writeFile(fileSys, new Path(openFile), (short) 3);
    // make sure the file was open for write
    FSDataOutputStream fdos = fileSys.append(new Path(openFile));
    LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(getCluster().getNameNode(0), openFile, 0, fileSize);
    DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
    DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
    ArrayList<String> nodes = new ArrayList<String>();
    ArrayList<DatanodeInfo> dnInfos = new ArrayList<DatanodeInfo>();
    DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
    for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) {
        DatanodeInfo found = datanodeInfo;
        for (DatanodeInfo dif : dnInfos4LastBlock) {
            if (datanodeInfo.equals(dif)) {
                found = null;
            }
        }
        if (found != null) {
            nodes.add(found.getXferAddr());
            dnInfos.add(dm.getDatanode(found));
        }
    }
    //decommission one of the 3 nodes which have last block
    nodes.add(dnInfos4LastBlock[0].getXferAddr());
    dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
    initExcludeHosts(nodes);
    refreshNodes(0);
    for (DatanodeInfo dn : dnInfos) {
        waitNodeState(dn, AdminStates.DECOMMISSIONED);
    }
    fdos.close();
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileSystem(org.apache.hadoop.fs.FileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)214 Test (org.junit.Test)103 Path (org.apache.hadoop.fs.Path)91 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)73 IOException (java.io.IOException)47 FileSystem (org.apache.hadoop.fs.FileSystem)44 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)43 ArrayList (java.util.ArrayList)39 Configuration (org.apache.hadoop.conf.Configuration)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)37 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)32 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)32 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)29 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)25 InetSocketAddress (java.net.InetSocketAddress)20 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)20 StorageType (org.apache.hadoop.fs.StorageType)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)14