Search in sources :

Example 96 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDataNodeVolumeFailure method triggerFailure.

/**
   * go to each block on the 2nd DataNode until it fails...
   * @param path
   * @param size
   * @throws IOException
   */
private void triggerFailure(String path, long size) throws IOException {
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    List<LocatedBlock> locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks();
    for (LocatedBlock lb : locatedBlocks) {
        DatanodeInfo dinfo = lb.getLocations()[1];
        ExtendedBlock b = lb.getBlock();
        try {
            accessBlock(dinfo, lb);
        } catch (IOException e) {
            System.out.println("Failure triggered, on block: " + b.getBlockId() + "; corresponding volume should be removed by now");
            break;
        }
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 97 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestMover method createFileWithFavoredDatanodes.

private void createFileWithFavoredDatanodes(final Configuration conf, final MiniDFSCluster cluster, final DistributedFileSystem dfs) throws IOException {
    // Adding two DISK based data node to the cluster.
    // Also, ensure that blocks are pinned in these new data nodes.
    StorageType[][] newtypes = new StorageType[][] { { StorageType.DISK }, { StorageType.DISK } };
    startAdditionalDNs(conf, 2, newtypes, cluster);
    ArrayList<DataNode> dataNodes = cluster.getDataNodes();
    InetSocketAddress[] favoredNodes = new InetSocketAddress[2];
    int j = 0;
    for (int i = dataNodes.size() - 1; i >= 2; i--) {
        favoredNodes[j++] = dataNodes.get(i).getXferAddress();
    }
    final String file = "/parent/testMoverFailedRetryWithPinnedBlocks2";
    final FSDataOutputStream out = dfs.create(new Path(file), FsPermission.getDefault(), true, DEFAULT_BLOCK_SIZE, (short) 2, DEFAULT_BLOCK_SIZE, null, favoredNodes);
    byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
    out.write(fileData);
    out.close();
    // Mock FsDatasetSpi#getPinning to show that the block is pinned.
    LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file, 0);
    Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
    LocatedBlock lb = locatedBlocks.get(0);
    DatanodeInfo datanodeInfo = lb.getLocations()[0];
    for (DataNode dn : cluster.getDataNodes()) {
        if (dn.getDatanodeId().getDatanodeUuid().equals(datanodeInfo.getDatanodeUuid())) {
            LOG.info("Simulate block pinning in datanode {}", datanodeInfo);
            DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
            break;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 98 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedSBSmallerThanFullBlocks.

@Test
public void testProcessOverReplicatedSBSmallerThanFullBlocks() throws Exception {
    // Create a EC file which doesn't fill full internal blocks.
    int fileLen = cellSize * (dataBlocks - 1);
    byte[] content = new byte[fileLen];
    DFSTestUtil.writeFile(fs, filePath, new String(content));
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    cluster.triggerBlockReports();
    List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
    // let a internal block be over replicated with (numDNs - GROUP_SIZE + 1)
    // redundant blocks. Therefor number of internal blocks is over GROUP_SIZE.
    blk.setBlockId(groupId);
    List<DataNode> dataNodeList = cluster.getDataNodes();
    for (int i = 0; i < numDNs; i++) {
        if (!infos.contains(dataNodeList.get(i).getDatanodeId())) {
            cluster.injectBlocks(i, Arrays.asList(blk), bpid);
            System.out.println("XXX: inject block into datanode " + i);
        }
    }
    // update blocksMap
    cluster.triggerBlockReports();
    // add to invalidates
    cluster.triggerHeartbeats();
    // datanode delete block
    cluster.triggerHeartbeats();
    // update blocksMap
    cluster.triggerBlockReports();
    // verify that all internal blocks exists
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize - 1);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Example 99 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestReconstructStripedBlocks method testCountLiveReplicas.

/**
   * make sure the NN can detect the scenario where there are enough number of
   * internal blocks (>=9 by default) but there is still missing data/parity
   * block.
   */
@Test
public void testCountLiveReplicas() throws Exception {
    final HdfsConfiguration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    try {
        fs.mkdirs(dirPath);
        fs.setErasureCodingPolicy(dirPath, StripedFileTestUtil.getDefaultECPolicy().getName());
        DFSTestUtil.createFile(fs, filePath, cellSize * dataBlocks * 2, (short) 1, 0L);
        // stop a dn
        LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
        LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock();
        DatanodeInfo dnToStop = block.getLocations()[0];
        MiniDFSCluster.DataNodeProperties dnProp = cluster.stopDataNode(dnToStop.getXferAddr());
        cluster.setDataNodeDead(dnToStop);
        // wait for reconstruction to happen
        DFSTestUtil.waitForReplication(fs, filePath, groupSize, 15 * 1000);
        // bring the dn back: 10 internal blocks now
        cluster.restartDataNode(dnProp);
        cluster.waitActive();
        // stop another dn: 9 internal blocks, but only cover 8 real one
        dnToStop = block.getLocations()[1];
        cluster.stopDataNode(dnToStop.getXferAddr());
        cluster.setDataNodeDead(dnToStop);
        // currently namenode is able to track the missing block. but restart NN
        cluster.restartNameNode(true);
        for (DataNode dn : cluster.getDataNodes()) {
            DataNodeTestUtils.triggerBlockReport(dn);
        }
        FSNamesystem fsn = cluster.getNamesystem();
        BlockManager bm = fsn.getBlockManager();
        // wait 3 running cycles of redundancy monitor
        Thread.sleep(3000);
        for (DataNode dn : cluster.getDataNodes()) {
            DataNodeTestUtils.triggerHeartbeat(dn);
        }
        // check if NN can detect the missing internal block and finish the
        // reconstruction
        StripedFileTestUtil.waitForReconstructionFinished(filePath, fs, groupSize);
        boolean reconstructed = false;
        for (int i = 0; i < 5; i++) {
            NumberReplicas num = null;
            fsn.readLock();
            try {
                BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile().getLastBlock();
                num = bm.countNodes(blockInfo);
            } finally {
                fsn.readUnlock();
            }
            if (num.liveReplicas() >= groupSize) {
                reconstructed = true;
                break;
            } else {
                Thread.sleep(1000);
            }
        }
        Assert.assertTrue(reconstructed);
        blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
        block = (LocatedStripedBlock) blks.getLastLocatedBlock();
        BitSet bitSet = new BitSet(groupSize);
        for (byte index : block.getBlockIndices()) {
            bitSet.set(index);
        }
        for (int i = 0; i < groupSize; i++) {
            Assert.assertTrue(bitSet.get(i));
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BitSet(java.util.BitSet) NumberReplicas(org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Test(org.junit.Test)

Example 100 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestReconstructStripedBlocks method test2RecoveryTasksForSameBlockGroup.

@Test
public void test2RecoveryTasksForSameBlockGroup() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1000);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2).build();
    try {
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        BlockManager bm = cluster.getNamesystem().getBlockManager();
        fs.getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName());
        int fileLen = dataBlocks * blockSize;
        Path p = new Path("/test2RecoveryTasksForSameBlockGroup");
        final byte[] data = new byte[fileLen];
        DFSTestUtil.writeFile(fs, p, data);
        LocatedStripedBlock lb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(p.toString(), 0).get(0);
        LocatedBlock[] lbs = StripedBlockUtil.parseStripedBlockGroup(lb, cellSize, dataBlocks, parityBlocks);
        assertEquals(0, getNumberOfBlocksToBeErasureCoded(cluster));
        assertEquals(0, bm.getPendingReconstructionBlocksCount());
        // missing 1 block, so 1 task should be scheduled
        DatanodeInfo dn0 = lbs[0].getLocations()[0];
        cluster.stopDataNode(dn0.getName());
        cluster.setDataNodeDead(dn0);
        BlockManagerTestUtil.getComputedDatanodeWork(bm);
        assertEquals(1, getNumberOfBlocksToBeErasureCoded(cluster));
        assertEquals(1, bm.getPendingReconstructionBlocksCount());
        // missing another block, but no new task should be scheduled because
        // previous task isn't finished.
        DatanodeInfo dn1 = lbs[1].getLocations()[0];
        cluster.stopDataNode(dn1.getName());
        cluster.setDataNodeDead(dn1);
        BlockManagerTestUtil.getComputedDatanodeWork(bm);
        assertEquals(1, getNumberOfBlocksToBeErasureCoded(cluster));
        assertEquals(1, bm.getPendingReconstructionBlocksCount());
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Test(org.junit.Test)

Aggregations

DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)214 Test (org.junit.Test)103 Path (org.apache.hadoop.fs.Path)91 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)73 IOException (java.io.IOException)47 FileSystem (org.apache.hadoop.fs.FileSystem)44 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)43 ArrayList (java.util.ArrayList)39 Configuration (org.apache.hadoop.conf.Configuration)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)37 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)32 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)32 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)29 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)25 InetSocketAddress (java.net.InetSocketAddress)20 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)20 StorageType (org.apache.hadoop.fs.StorageType)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)14