Search in sources :

Example 71 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestNameNodeMetrics method testStaleNodes.

/** Test metrics indicating the number of stale DataNodes */
@Test
public void testStaleNodes() throws Exception {
    // Set two datanodes as stale
    for (int i = 0; i < 2; i++) {
        DataNode dn = cluster.getDataNodes().get(i);
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
        long staleInterval = CONF.getLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
        DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId());
        DFSTestUtil.resetLastUpdatesWithOffset(dnDes, -(staleInterval + 1));
    }
    // Let HeartbeatManager to check heartbeat
    BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem().getBlockManager());
    assertGauge("StaleDataNodes", 2, getMetrics(NS_METRICS));
    // Reset stale datanodes
    for (int i = 0; i < 2; i++) {
        DataNode dn = cluster.getDataNodes().get(i);
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
        DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId());
        DFSTestUtil.resetLastUpdatesWithOffset(dnDes, 0);
    }
    // Let HeartbeatManager to refresh
    BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem().getBlockManager());
    assertGauge("StaleDataNodes", 0, getMetrics(NS_METRICS));
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Test(org.junit.Test)

Example 72 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDFSStripedInputStream method setup.

@Before
public void setup() throws IOException {
    /*
     * Initialize erasure coding policy.
     */
    ecPolicy = getEcPolicy();
    dataBlocks = (short) ecPolicy.getNumDataUnits();
    parityBlocks = (short) ecPolicy.getNumParityUnits();
    cellSize = ecPolicy.getCellSize();
    blockSize = stripesPerBlock * cellSize;
    blockGroupSize = dataBlocks * blockSize;
    System.out.println("EC policy = " + ecPolicy);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, getEcPolicy().getName());
    if (ErasureCodeNative.isNativeCodeLoaded()) {
        conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY, NativeRSRawErasureCoderFactory.class.getCanonicalName());
    }
    SimulatedFSDataset.setFactory(conf);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dataBlocks + parityBlocks).build();
    cluster.waitActive();
    for (DataNode dn : cluster.getDataNodes()) {
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    fs = cluster.getFileSystem();
    fs.mkdirs(dirPath);
    fs.getClient().setErasureCodingPolicy(dirPath.toString(), ecPolicy.getName());
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) NativeRSRawErasureCoderFactory(org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory) Before(org.junit.Before)

Example 73 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestMover method createFileWithFavoredDatanodes.

private void createFileWithFavoredDatanodes(final Configuration conf, final MiniDFSCluster cluster, final DistributedFileSystem dfs) throws IOException {
    // Adding two DISK based data node to the cluster.
    // Also, ensure that blocks are pinned in these new data nodes.
    StorageType[][] newtypes = new StorageType[][] { { StorageType.DISK }, { StorageType.DISK } };
    startAdditionalDNs(conf, 2, newtypes, cluster);
    ArrayList<DataNode> dataNodes = cluster.getDataNodes();
    InetSocketAddress[] favoredNodes = new InetSocketAddress[2];
    int j = 0;
    for (int i = dataNodes.size() - 1; i >= 2; i--) {
        favoredNodes[j++] = dataNodes.get(i).getXferAddress();
    }
    final String file = "/parent/testMoverFailedRetryWithPinnedBlocks2";
    final FSDataOutputStream out = dfs.create(new Path(file), FsPermission.getDefault(), true, DEFAULT_BLOCK_SIZE, (short) 2, DEFAULT_BLOCK_SIZE, null, favoredNodes);
    byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
    out.write(fileData);
    out.close();
    // Mock FsDatasetSpi#getPinning to show that the block is pinned.
    LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file, 0);
    Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
    LocatedBlock lb = locatedBlocks.get(0);
    DatanodeInfo datanodeInfo = lb.getLocations()[0];
    for (DataNode dn : cluster.getDataNodes()) {
        if (dn.getDatanodeId().getDatanodeUuid().equals(datanodeInfo.getDatanodeUuid())) {
            LOG.info("Simulate block pinning in datanode {}", datanodeInfo);
            DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
            break;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 74 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestMover method testMoverWithPinnedBlocks.

/**
   * Test to verify that mover can't move pinned blocks.
   */
@Test(timeout = 90000)
public void testMoverWithPinnedBlocks() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    // Sets bigger retry max attempts value so that test case will timed out if
    // block pinning errors are not handled properly during block movement.
    conf.setInt(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, 10000);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String file = "/testMoverWithPinnedBlocks/file";
        Path dir = new Path("/testMoverWithPinnedBlocks");
        dfs.mkdirs(dir);
        // write to DISK
        dfs.setStoragePolicy(dir, "HOT");
        final FSDataOutputStream out = dfs.create(new Path(file));
        byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 3);
        out.write(fileData);
        out.close();
        // verify before movement
        LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
        StorageType[] storageTypes = lb.getStorageTypes();
        for (StorageType storageType : storageTypes) {
            Assert.assertTrue(StorageType.DISK == storageType);
        }
        // Adding one SSD based data node to the cluster.
        StorageType[][] newtypes = new StorageType[][] { { StorageType.SSD } };
        startAdditionalDNs(conf, 1, newtypes, cluster);
        // Mock FsDatasetSpi#getPinning to show that the block is pinned.
        for (int i = 0; i < cluster.getDataNodes().size(); i++) {
            DataNode dn = cluster.getDataNodes().get(i);
            LOG.info("Simulate block pinning in datanode {}", dn);
            DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
        }
        // move file blocks to ONE_SSD policy
        dfs.setStoragePolicy(dir, "ONE_SSD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
        int exitcode = ExitStatus.NO_MOVE_BLOCK.getExitCode();
        Assert.assertEquals("Movement should fail", exitcode, rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 75 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestStorageMover method testNoSpaceArchive.

/**
   * Test ARCHIVE is running out of spaces.
   */
@Test
public void testNoSpaceArchive() throws Exception {
    LOG.info("testNoSpaceArchive");
    final PathPolicyMap pathPolicyMap = new PathPolicyMap(0);
    final NamespaceScheme nsScheme = pathPolicyMap.newNamespaceScheme();
    final ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
    final MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
    try {
        test.runBasicTest(false);
        // create 2 hot files with replication 3
        final short replication = 3;
        for (int i = 0; i < 2; i++) {
            final Path p = new Path(pathPolicyMap.cold, "file" + i);
            DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, replication, 0L);
            waitForAllReplicas(replication, p, test.dfs, 10);
        }
        // set all the ARCHIVE volume to full
        for (DataNode dn : test.cluster.getDataNodes()) {
            setVolumeFull(dn, StorageType.ARCHIVE);
            DataNodeTestUtils.triggerHeartbeat(dn);
        }
        {
            // test increasing replication but new replicas cannot be created
            // since no more ARCHIVE space.
            final Path file0 = new Path(pathPolicyMap.cold, "file0");
            final Replication r = test.getReplication(file0);
            Assert.assertEquals(0, r.disk);
            final short newReplication = (short) 5;
            test.dfs.setReplication(file0, newReplication);
            waitForAllReplicas(r.archive, file0, test.dfs, 10);
            test.verifyReplication(file0, 0, r.archive);
        }
        {
            // test creating a hot file
            final Path p = new Path(pathPolicyMap.hot, "foo");
            DFSTestUtil.createFile(test.dfs, p, BLOCK_SIZE, (short) 3, 0L);
        }
        {
            //test move a cold file to warm
            final Path file1 = new Path(pathPolicyMap.cold, "file1");
            test.dfs.rename(file1, pathPolicyMap.warm);
            test.migrate(ExitStatus.SUCCESS);
            test.verify(true);
        }
    } finally {
        test.shutdownCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Test(org.junit.Test)

Aggregations

DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)165 Test (org.junit.Test)110 Path (org.apache.hadoop.fs.Path)78 Configuration (org.apache.hadoop.conf.Configuration)60 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)47 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)37 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)37 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)35 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)29 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)28 FileSystem (org.apache.hadoop.fs.FileSystem)27 IOException (java.io.IOException)24 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 ArrayList (java.util.ArrayList)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)17 File (java.io.File)15 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)14 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)13 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)12