Search in sources :

Example 11 with BlockInfoStriped

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.

the class FSDirWriteFileOp method addBlock.

/**
   * Add a block to the file. Returns a reference to the added block.
   */
private static BlockInfo addBlock(FSDirectory fsd, String path, INodesInPath inodesInPath, Block block, DatanodeStorageInfo[] targets, BlockType blockType) throws IOException {
    fsd.writeLock();
    try {
        final INodeFile fileINode = inodesInPath.getLastINode().asFile();
        Preconditions.checkState(fileINode.isUnderConstruction());
        // associate new last block for the file
        final BlockInfo blockInfo;
        if (blockType == BlockType.STRIPED) {
            ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), inodesInPath);
            short numDataUnits = (short) ecPolicy.getNumDataUnits();
            short numParityUnits = (short) ecPolicy.getNumParityUnits();
            short numLocations = (short) (numDataUnits + numParityUnits);
            // check quota limits and updated space consumed
            fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), numLocations, true);
            blockInfo = new BlockInfoStriped(block, ecPolicy);
            blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
        } else {
            // check quota limits and updated space consumed
            fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), fileINode.getFileReplication(), true);
            short numLocations = fileINode.getFileReplication();
            blockInfo = new BlockInfoContiguous(block, numLocations);
            blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
        }
        fsd.getBlockManager().addBlockCollection(blockInfo, fileINode);
        fileINode.addBlock(blockInfo);
        if (NameNode.stateChangeLog.isDebugEnabled()) {
            NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: " + path + " with " + block + " block is added to the in-memory " + "file system");
        }
        return blockInfo;
    } finally {
        fsd.writeUnlock();
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)

Example 12 with BlockInfoStriped

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.

the class INodeFile method computeFileSize.

/**
   * Compute file size of the current file.
   * 
   * @param includesLastUcBlock
   *          If the last block is under construction, should it be included?
   * @param usePreferredBlockSize4LastUcBlock
   *          If the last block is under construction, should we use actual
   *          block size or preferred block size?
   *          Note that usePreferredBlockSize4LastUcBlock is ignored
   *          if includesLastUcBlock == false.
   * @return file size
   */
public final long computeFileSize(boolean includesLastUcBlock, boolean usePreferredBlockSize4LastUcBlock) {
    if (blocks.length == 0) {
        return 0;
    }
    final int last = blocks.length - 1;
    //check if the last block is BlockInfoUnderConstruction
    BlockInfo lastBlk = blocks[last];
    long size = lastBlk.getNumBytes();
    if (!lastBlk.isComplete()) {
        if (!includesLastUcBlock) {
            size = 0;
        } else if (usePreferredBlockSize4LastUcBlock) {
            size = isStriped() ? getPreferredBlockSize() * ((BlockInfoStriped) lastBlk).getDataBlockNum() : getPreferredBlockSize();
        }
    }
    //sum other blocks
    for (int i = 0; i < last; i++) {
        size += blocks[i].getNumBytes();
    }
    return size;
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 13 with BlockInfoStriped

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.

the class NamenodeFsck method collectBlocksSummary.

private void collectBlocksSummary(String parent, HdfsFileStatus file, Result res, LocatedBlocks blocks) throws IOException {
    String path = file.getFullName(parent);
    boolean isOpen = blocks.isUnderConstruction();
    if (isOpen && !showOpenFiles) {
        return;
    }
    int missing = 0;
    int corrupt = 0;
    long missize = 0;
    long corruptSize = 0;
    int underReplicatedPerFile = 0;
    int misReplicatedPerFile = 0;
    StringBuilder report = new StringBuilder();
    int blockNumber = 0;
    final LocatedBlock lastBlock = blocks.getLastLocatedBlock();
    for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        ExtendedBlock block = lBlk.getBlock();
        if (!blocks.isLastBlockComplete() && lastBlock != null && lastBlock.getBlock().equals(block)) {
            // it is under construction
            continue;
        }
        final BlockInfo storedBlock = blockManager.getStoredBlock(block.getLocalBlock());
        final int minReplication = blockManager.getMinStorageNum(storedBlock);
        // count decommissionedReplicas / decommissioningReplicas
        NumberReplicas numberReplicas = blockManager.countNodes(storedBlock);
        int decommissionedReplicas = numberReplicas.decommissioned();
        int decommissioningReplicas = numberReplicas.decommissioning();
        int enteringMaintenanceReplicas = numberReplicas.liveEnteringMaintenanceReplicas();
        int inMaintenanceReplicas = numberReplicas.maintenanceNotForReadReplicas();
        res.decommissionedReplicas += decommissionedReplicas;
        res.decommissioningReplicas += decommissioningReplicas;
        res.enteringMaintenanceReplicas += enteringMaintenanceReplicas;
        res.inMaintenanceReplicas += inMaintenanceReplicas;
        // count total replicas
        int liveReplicas = numberReplicas.liveReplicas();
        int totalReplicasPerBlock = liveReplicas + decommissionedReplicas + decommissioningReplicas + enteringMaintenanceReplicas + inMaintenanceReplicas;
        res.totalReplicas += totalReplicasPerBlock;
        boolean isMissing;
        if (storedBlock.isStriped()) {
            isMissing = totalReplicasPerBlock < minReplication;
        } else {
            isMissing = totalReplicasPerBlock == 0;
        }
        // count expected replicas
        short targetFileReplication;
        if (file.getErasureCodingPolicy() != null) {
            assert storedBlock instanceof BlockInfoStriped;
            targetFileReplication = ((BlockInfoStriped) storedBlock).getRealTotalBlockNum();
        } else {
            targetFileReplication = file.getReplication();
        }
        res.numExpectedReplicas += targetFileReplication;
        // count under min repl'd blocks
        if (totalReplicasPerBlock < minReplication) {
            res.numUnderMinReplicatedBlocks++;
        }
        // count excessive Replicas / over replicated blocks
        if (liveReplicas > targetFileReplication) {
            res.excessiveReplicas += (liveReplicas - targetFileReplication);
            res.numOverReplicatedBlocks += 1;
        }
        // count corrupt blocks
        boolean isCorrupt = lBlk.isCorrupt();
        if (isCorrupt) {
            res.addCorrupt(block.getNumBytes());
            corrupt++;
            corruptSize += block.getNumBytes();
            out.print("\n" + path + ": CORRUPT blockpool " + block.getBlockPoolId() + " block " + block.getBlockName() + "\n");
        }
        // count minimally replicated blocks
        if (totalReplicasPerBlock >= minReplication)
            res.numMinReplicatedBlocks++;
        // count missing replicas / under replicated blocks
        if (totalReplicasPerBlock < targetFileReplication && !isMissing) {
            res.missingReplicas += (targetFileReplication - totalReplicasPerBlock);
            res.numUnderReplicatedBlocks += 1;
            underReplicatedPerFile++;
            if (!showFiles) {
                out.print("\n" + path + ": ");
            }
            out.println(" Under replicated " + block + ". Target Replicas is " + targetFileReplication + " but found " + liveReplicas + " live replica(s), " + decommissionedReplicas + " decommissioned replica(s), " + decommissioningReplicas + " decommissioning replica(s)" + (this.showMaintenanceState ? (enteringMaintenanceReplicas + ", entering maintenance replica(s) and " + inMaintenanceReplicas + " in maintenance replica(s).") : "."));
        }
        // count mis replicated blocks
        BlockPlacementStatus blockPlacementStatus = bpPolicies.getPolicy(lBlk.getBlockType()).verifyBlockPlacement(lBlk.getLocations(), targetFileReplication);
        if (!blockPlacementStatus.isPlacementPolicySatisfied()) {
            res.numMisReplicatedBlocks++;
            misReplicatedPerFile++;
            if (!showFiles) {
                if (underReplicatedPerFile == 0)
                    out.println();
                out.print(path + ": ");
            }
            out.println(" Replica placement policy is violated for " + block + ". " + blockPlacementStatus.getErrorDescription());
        }
        // count storage summary
        if (this.showStoragePolcies && lBlk.getStorageTypes() != null) {
            countStorageTypeSummary(file, lBlk);
        }
        // report
        String blkName = block.toString();
        report.append(blockNumber + ". " + blkName + " len=" + block.getNumBytes());
        if (isMissing && !isCorrupt) {
            // If the block is corrupted, it means all its available replicas are
            // corrupted in the case of replication, and it means the state of the
            // block group is unrecoverable due to some corrupted intenal blocks in
            // the case of EC. We don't mark it as missing given these available
            // replicas/internal-blocks might still be accessible as the block might
            // be incorrectly marked as corrupted by client machines.
            report.append(" MISSING!");
            res.addMissing(blkName, block.getNumBytes());
            missing++;
            missize += block.getNumBytes();
            if (storedBlock.isStriped()) {
                report.append(" Live_repl=" + liveReplicas);
                String info = getReplicaInfo(storedBlock);
                if (!info.isEmpty()) {
                    report.append(" ").append(info);
                }
            }
        } else {
            report.append(" Live_repl=" + liveReplicas);
            String info = getReplicaInfo(storedBlock);
            if (!info.isEmpty()) {
                report.append(" ").append(info);
            }
        }
        report.append('\n');
        blockNumber++;
    }
    //display under construction block info.
    if (!blocks.isLastBlockComplete() && lastBlock != null) {
        ExtendedBlock block = lastBlock.getBlock();
        String blkName = block.toString();
        BlockInfo storedBlock = blockManager.getStoredBlock(block.getLocalBlock());
        DatanodeStorageInfo[] storages = storedBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        report.append('\n');
        report.append("Under Construction Block:\n");
        report.append(blockNumber).append(". ").append(blkName);
        report.append(" len=").append(block.getNumBytes());
        report.append(" Expected_repl=" + storages.length);
        String info = getReplicaInfo(storedBlock);
        if (!info.isEmpty()) {
            report.append(" ").append(info);
        }
    }
    // count corrupt file & move or delete if necessary
    if ((missing > 0) || (corrupt > 0)) {
        if (!showFiles) {
            if (missing > 0) {
                out.print("\n" + path + ": MISSING " + missing + " blocks of total size " + missize + " B.");
            }
            if (corrupt > 0) {
                out.print("\n" + path + ": CORRUPT " + corrupt + " blocks of total size " + corruptSize + " B.");
            }
        }
        res.corruptFiles++;
        if (isOpen) {
            LOG.info("Fsck: ignoring open file " + path);
        } else {
            if (doMove)
                copyBlocksToLostFound(parent, file, blocks);
            if (doDelete)
                deleteCorruptedFile(path);
        }
    }
    if (showFiles) {
        if (missing > 0 || corrupt > 0) {
            if (missing > 0) {
                out.print(" MISSING " + missing + " blocks of total size " + missize + " B\n");
            }
            if (corrupt > 0) {
                out.print(" CORRUPT " + corrupt + " blocks of total size " + corruptSize + " B\n");
            }
        } else if (underReplicatedPerFile == 0 && misReplicatedPerFile == 0) {
            out.print(" OK\n");
        }
        if (showBlocks) {
            out.print(report + "\n");
        }
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) NumberReplicas(org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas) BlockPlacementStatus(org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 14 with BlockInfoStriped

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndCorruptStripedBlock.

@Test
public void testProcessOverReplicatedAndCorruptStripedBlock() throws Exception {
    long fileLen = dataBlocks * blockSize;
    DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    BlockInfoStriped blockInfo = new BlockInfoStriped(blk, StripedFileTestUtil.getDefaultECPolicy());
    for (int i = 0; i < groupSize; i++) {
        blk.setBlockId(groupId + i);
        cluster.injectBlocks(i, Arrays.asList(blk), bpid);
    }
    cluster.triggerBlockReports();
    // let a internal block be corrupt
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
    List<String> storages = Arrays.asList(bg.getStorageIDs());
    cluster.getNamesystem().writeLock();
    try {
        bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(), infos.get(0), storages.get(0), "TEST");
    } finally {
        cluster.getNamesystem().writeUnlock();
    }
    assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo)).corruptReplicas());
    // let a internal block be over replicated with 2 redundant block.
    blk.setBlockId(groupId + 2);
    cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
    cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
    // update blocksMap
    cluster.triggerBlockReports();
    // verify that all internal blocks exists except b0
    // the redundant internal blocks will not be deleted before the corrupted
    // block gets reconstructed. but since we set
    // DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY to 0, the reconstruction will
    // not happen
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    bg = (LocatedStripedBlock) (lbs.get(0));
    assertEquals(groupSize + 1, bg.getBlockIndices().length);
    assertEquals(groupSize + 1, bg.getLocations().length);
    BitSet set = new BitSet(groupSize);
    for (byte index : bg.getBlockIndices()) {
        set.set(index);
    }
    Assert.assertFalse(set.get(0));
    for (int i = 1; i < groupSize; i++) {
        assertTrue(set.get(i));
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BitSet(java.util.BitSet) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Example 15 with BlockInfoStriped

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.

the class TestAddStripedBlockInFBR method testAddBlockInFullBlockReport.

@Test
public void testAddBlockInFullBlockReport() throws Exception {
    BlockManager spy = Mockito.spy(cluster.getNamesystem().getBlockManager());
    // let NN ignore one DataNode's IBR
    final DataNode dn = cluster.getDataNodes().get(0);
    final DatanodeID datanodeID = dn.getDatanodeId();
    Mockito.doNothing().when(spy).processIncrementalBlockReport(Mockito.eq(datanodeID), Mockito.any());
    Whitebox.setInternalState(cluster.getNamesystem(), "blockManager", spy);
    final Path ecDir = new Path("/ec");
    final Path repDir = new Path("/rep");
    dfs.mkdirs(ecDir);
    dfs.mkdirs(repDir);
    dfs.getClient().setErasureCodingPolicy(ecDir.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
    // create several non-EC files and one EC file
    final Path[] repFiles = new Path[groupSize];
    for (int i = 0; i < groupSize; i++) {
        repFiles[i] = new Path(repDir, "f" + i);
        DFSTestUtil.createFile(dfs, repFiles[i], 1L, (short) 3, 0L);
    }
    final Path ecFile = new Path(ecDir, "f");
    DFSTestUtil.createFile(dfs, ecFile, cellSize * dataBlocks, (short) 1, 0L);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            try {
                // trigger dn's FBR. The FBR will add block-dn mapping.
                cluster.triggerBlockReports();
                // make sure NN has correct block-dn mapping
                BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem().getFSDirectory().getINode(ecFile.toString()).asFile().getLastBlock();
                NumberReplicas nr = spy.countNodes(blockInfo);
                return nr.excessReplicas() == 0 && nr.liveReplicas() == groupSize;
            } catch (Exception ignored) {
            // Ignore the exception
            }
            return false;
        }
    }, 3000, 60000);
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) NumberReplicas(org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)25 Block (org.apache.hadoop.hdfs.protocol.Block)15 Test (org.junit.Test)14 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)13 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)11 Path (org.apache.hadoop.fs.Path)6 IOException (java.io.IOException)4 Configuration (org.apache.hadoop.conf.Configuration)4 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)4 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)3 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)3 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)3 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)2