Search in sources :

Example 36 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestBlockUnderConstruction method testGetBlockLocations.

/**
   * Test NameNode.getBlockLocations(..) on reading un-closed files.
   */
@Test
public void testGetBlockLocations() throws IOException {
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
    final Path p = new Path(BASE_DIR, "file2.dat");
    final String src = p.toString();
    final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
    // write a half block
    int len = BLOCK_SIZE >>> 1;
    writeFile(p, out, len);
    for (int i = 1; i < NUM_BLOCKS; ) {
        // verify consistency
        final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
        final List<LocatedBlock> blocks = lb.getLocatedBlocks();
        assertEquals(i, blocks.size());
        final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
        assertFalse(blockManager.getStoredBlock(b).isComplete());
        if (++i < NUM_BLOCKS) {
            // write one more block
            writeFile(p, out, BLOCK_SIZE);
            len += BLOCK_SIZE;
        }
    }
    // close file
    out.close();
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 37 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndCorruptStripedBlock.

@Test
public void testProcessOverReplicatedAndCorruptStripedBlock() throws Exception {
    long fileLen = dataBlocks * blockSize;
    DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    BlockInfoStriped blockInfo = new BlockInfoStriped(blk, StripedFileTestUtil.getDefaultECPolicy());
    for (int i = 0; i < groupSize; i++) {
        blk.setBlockId(groupId + i);
        cluster.injectBlocks(i, Arrays.asList(blk), bpid);
    }
    cluster.triggerBlockReports();
    // let a internal block be corrupt
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
    List<String> storages = Arrays.asList(bg.getStorageIDs());
    cluster.getNamesystem().writeLock();
    try {
        bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(), infos.get(0), storages.get(0), "TEST");
    } finally {
        cluster.getNamesystem().writeUnlock();
    }
    assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo)).corruptReplicas());
    // let a internal block be over replicated with 2 redundant block.
    blk.setBlockId(groupId + 2);
    cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
    cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
    // update blocksMap
    cluster.triggerBlockReports();
    // verify that all internal blocks exists except b0
    // the redundant internal blocks will not be deleted before the corrupted
    // block gets reconstructed. but since we set
    // DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY to 0, the reconstruction will
    // not happen
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    bg = (LocatedStripedBlock) (lbs.get(0));
    assertEquals(groupSize + 1, bg.getBlockIndices().length);
    assertEquals(groupSize + 1, bg.getLocations().length);
    BitSet set = new BitSet(groupSize);
    for (byte index : bg.getBlockIndices()) {
        set.set(index);
    }
    Assert.assertFalse(set.get(0));
    for (int i = 1; i < groupSize; i++) {
        assertTrue(set.get(i));
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BitSet(java.util.BitSet) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Example 38 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestAddStripedBlockInFBR method testAddBlockInFullBlockReport.

@Test
public void testAddBlockInFullBlockReport() throws Exception {
    BlockManager spy = Mockito.spy(cluster.getNamesystem().getBlockManager());
    // let NN ignore one DataNode's IBR
    final DataNode dn = cluster.getDataNodes().get(0);
    final DatanodeID datanodeID = dn.getDatanodeId();
    Mockito.doNothing().when(spy).processIncrementalBlockReport(Mockito.eq(datanodeID), Mockito.any());
    Whitebox.setInternalState(cluster.getNamesystem(), "blockManager", spy);
    final Path ecDir = new Path("/ec");
    final Path repDir = new Path("/rep");
    dfs.mkdirs(ecDir);
    dfs.mkdirs(repDir);
    dfs.getClient().setErasureCodingPolicy(ecDir.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
    // create several non-EC files and one EC file
    final Path[] repFiles = new Path[groupSize];
    for (int i = 0; i < groupSize; i++) {
        repFiles[i] = new Path(repDir, "f" + i);
        DFSTestUtil.createFile(dfs, repFiles[i], 1L, (short) 3, 0L);
    }
    final Path ecFile = new Path(ecDir, "f");
    DFSTestUtil.createFile(dfs, ecFile, cellSize * dataBlocks, (short) 1, 0L);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            try {
                // trigger dn's FBR. The FBR will add block-dn mapping.
                cluster.triggerBlockReports();
                // make sure NN has correct block-dn mapping
                BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem().getFSDirectory().getINode(ecFile.toString()).asFile().getLastBlock();
                NumberReplicas nr = spy.countNodes(blockInfo);
                return nr.excessReplicas() == 0 && nr.liveReplicas() == groupSize;
            } catch (Exception ignored) {
            // Ignore the exception
            }
            return false;
        }
    }, 3000, 60000);
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) NumberReplicas(org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas) IOException(java.io.IOException) Test(org.junit.Test)

Example 39 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestAddStripedBlocks method testCheckStripedReplicaCorrupt.

@Test
public void testCheckStripedReplicaCorrupt() throws Exception {
    final int numBlocks = 4;
    final int numStripes = 4;
    final Path filePath = new Path("/corrupt");
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    final BlockManager bm = ns.getBlockManager();
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, numStripes, false);
    INodeFile fileNode = ns.getFSDirectory().getINode(filePath.toString()).asFile();
    Assert.assertTrue(fileNode.isStriped());
    BlockInfo stored = fileNode.getBlocks()[0];
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
    // Now send a block report with correct size
    DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
    final Block reported = new Block(stored);
    reported.setNumBytes(numStripes * cellSize);
    StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(0, ns.getCorruptReplicaBlocks());
    // Now send a block report with wrong size
    reported.setBlockId(stored.getBlockId() + 1);
    reported.setNumBytes(numStripes * cellSize - 1);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(1).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // Now send a parity block report with correct size
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes(numStripes * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // Now send a parity block report with wrong size
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes(numStripes * cellSize + 1);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(3).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    // the total number of corrupted block info is still 1
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    // 2 internal blocks corrupted
    Assert.assertEquals(2, bm.getCorruptReplicas(stored).size());
    // Now change the size of stored block, and test verifying the last
    // block size
    stored.setNumBytes(stored.getNumBytes() + 10);
    reported.setBlockId(stored.getBlockId() + dataBlocks + 2);
    reported.setNumBytes(numStripes * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(4).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    // Now send a parity block report with correct size based on adjusted
    // size of stored block
    /** Now stored block has {@link numStripes} full stripes + a cell + 10 */
    stored.setNumBytes(stored.getNumBytes() + cellSize);
    reported.setBlockId(stored.getBlockId());
    reported.setNumBytes((numStripes + 1) * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    reported.setBlockId(stored.getBlockId() + 1);
    reported.setNumBytes(numStripes * cellSize + 10);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(0).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
    reported.setBlockId(stored.getBlockId() + dataBlocks);
    reported.setNumBytes((numStripes + 1) * cellSize);
    reports = DFSTestUtil.makeReportForReceivedBlock(reported, ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
    ns.processIncrementalBlockReport(cluster.getDataNodes().get(2).getDatanodeId(), reports[0]);
    BlockManagerTestUtil.updateState(ns.getBlockManager());
    Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
    Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) Test(org.junit.Test)

Example 40 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class FSDirAttrOp method unprotectedSetReplication.

static BlockInfo[] unprotectedSetReplication(FSDirectory fsd, INodesInPath iip, short replication) throws QuotaExceededException, UnresolvedLinkException, SnapshotAccessControlException, UnsupportedActionException {
    assert fsd.hasWriteLock();
    final BlockManager bm = fsd.getBlockManager();
    final INode inode = iip.getLastINode();
    if (inode == null || !inode.isFile() || inode.asFile().isStriped()) {
        // TODO we do not support replication on stripe layout files yet
        return null;
    }
    INodeFile file = inode.asFile();
    // Make sure the directory has sufficient quotas
    short oldBR = file.getPreferredBlockReplication();
    long size = file.computeFileSize(true, true);
    // Ensure the quota does not exceed
    if (oldBR < replication) {
        fsd.updateCount(iip, 0L, size, oldBR, replication, true);
    }
    file.setFileReplication(replication, iip.getLatestSnapshotId());
    short targetReplication = (short) Math.max(replication, file.getPreferredBlockReplication());
    if (oldBR > replication) {
        fsd.updateCount(iip, 0L, size, oldBR, targetReplication, true);
    }
    for (BlockInfo b : file.getBlocks()) {
        bm.setReplication(oldBR, targetReplication, b);
    }
    if (oldBR != -1) {
        if (oldBR > targetReplication) {
            FSDirectory.LOG.info("Decreasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath());
        } else {
            FSDirectory.LOG.info("Increasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath());
        }
    }
    return file.getBlocks();
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Aggregations

BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)47 Test (org.junit.Test)33 Path (org.apache.hadoop.fs.Path)21 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)13 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 IOException (java.io.IOException)11 Configuration (org.apache.hadoop.conf.Configuration)11 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)11 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)11 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 Block (org.apache.hadoop.hdfs.protocol.Block)8 FileNotFoundException (java.io.FileNotFoundException)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)7 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)7 FileSystem (org.apache.hadoop.fs.FileSystem)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)6