Search in sources :

Example 61 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestEncryptedTransfer method testEncryptedAppendRequiringBlockTransfer.

@Test
public void testEncryptedAppendRequiringBlockTransfer() throws IOException {
    setEncryptionConfigKeys();
    // start up 4 DNs
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = getFileSystem(conf);
    // Create a file with replication 3, so its block is on 3 / 4 DNs.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    // Shut down one of the DNs holding a block replica.
    FSDataInputStream in = fs.open(TEST_PATH);
    List<LocatedBlock> locatedBlocks = DFSTestUtil.getAllBlocks(in);
    in.close();
    assertEquals(1, locatedBlocks.size());
    assertEquals(3, locatedBlocks.get(0).getLocations().length);
    DataNode dn = cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort());
    dn.shutdown();
    // Reopen the file for append, which will need to add another DN to the
    // pipeline and in doing so trigger a block transfer.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 62 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDatanodeReport method assertReports.

static void assertReports(int numDatanodes, DatanodeReportType type, DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
    final DatanodeInfo[] infos = client.datanodeReport(type);
    assertEquals(numDatanodes, infos.length);
    final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
    assertEquals(numDatanodes, reports.length);
    for (int i = 0; i < infos.length; i++) {
        assertEquals(infos[i], reports[i].getDatanodeInfo());
        final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
        if (bpid != null) {
            //check storage
            final StorageReport[] computed = reports[i].getStorageReports();
            Arrays.sort(computed, CMP);
            final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
            Arrays.sort(expected, CMP);
            assertEquals(expected.length, computed.length);
            for (int j = 0; j < expected.length; j++) {
                assertEquals(expected[j].getStorage().getStorageID(), computed[j].getStorage().getStorageID());
            }
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport)

Example 63 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDatanodeReport method testDatanodeReport.

/**
   * This test attempts to different types of datanode report.
   */
@Test
public void testDatanodeReport() throws Exception {
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, // 0.5s
    500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
    try {
        //wait until the cluster is up
        cluster.waitActive();
        final String bpid = cluster.getNamesystem().getBlockPoolId();
        final List<DataNode> datanodes = cluster.getDataNodes();
        final DFSClient client = cluster.getFileSystem().dfs;
        assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
        assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
        assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);
        // bring down one datanode
        final DataNode last = datanodes.get(datanodes.size() - 1);
        LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
        last.shutdown();
        DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
        while (nodeInfo.length != 1) {
            try {
                Thread.sleep(500);
            } catch (Exception e) {
            }
            nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
        }
        assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
        assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
        assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
        Thread.sleep(5000);
        assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
    } finally {
        cluster.shutdown();
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) IOException(java.io.IOException) Test(org.junit.Test)

Example 64 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDecommission method doDecomCheck.

private void doDecomCheck(DatanodeManager datanodeManager, DecommissionManager decomManager, int expectedNumCheckedNodes) throws IOException, ExecutionException, InterruptedException {
    // Decom all nodes
    ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
    for (DataNode d : getCluster().getDataNodes()) {
        DatanodeInfo dn = takeNodeOutofService(0, d.getDatanodeUuid(), 0, decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
        decommissionedNodes.add(dn);
    }
    // Run decom scan and check
    BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
    assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes, decomManager.getNumNodesChecked());
    // Recommission all nodes
    for (DatanodeInfo dn : decommissionedNodes) {
        putNodeInService(0, dn);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode)

Example 65 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDecommission method testClusterStats.

public void testClusterStats(int numNameNodes) throws IOException, InterruptedException {
    LOG.info("Starting test testClusterStats");
    int numDatanodes = 1;
    startCluster(numNameNodes, numDatanodes);
    for (int i = 0; i < numNameNodes; i++) {
        FileSystem fileSys = getCluster().getFileSystem(i);
        Path file = new Path("testClusterStats.dat");
        writeFile(fileSys, file, 1);
        FSNamesystem fsn = getCluster().getNamesystem(i);
        NameNode namenode = getCluster().getNameNode(i);
        DatanodeInfo decomInfo = takeNodeOutofService(i, null, 0, null, AdminStates.DECOMMISSION_INPROGRESS);
        DataNode decomNode = getDataNode(decomInfo);
        // Check namenode stats for multiple datanode heartbeats
        verifyStats(namenode, fsn, decomInfo, decomNode, true);
        // Stop decommissioning and verify stats
        DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
        putNodeInService(i, retInfo);
        DataNode retNode = getDataNode(decomInfo);
        verifyStats(namenode, fsn, retInfo, retNode, false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Aggregations

DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)165 Test (org.junit.Test)110 Path (org.apache.hadoop.fs.Path)78 Configuration (org.apache.hadoop.conf.Configuration)60 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)47 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)37 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)37 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)35 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)29 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)28 FileSystem (org.apache.hadoop.fs.FileSystem)27 IOException (java.io.IOException)24 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 ArrayList (java.util.ArrayList)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)17 File (java.io.File)15 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)14 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)13 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)12