Search in sources :

Example 6 with BlockECReconstructionInfo

use of org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo in project hadoop by apache.

the class PBHelper method convert.

public static BlockECReconstructionCommandProto convert(BlockECReconstructionCommand blkECReconstructionCmd) {
    BlockECReconstructionCommandProto.Builder builder = BlockECReconstructionCommandProto.newBuilder();
    Collection<BlockECReconstructionInfo> blockECRInfos = blkECReconstructionCmd.getECTasks();
    for (BlockECReconstructionInfo blkECReconstructInfo : blockECRInfos) {
        builder.addBlockECReconstructioninfo(convertBlockECRecoveryInfo(blkECReconstructInfo));
    }
    return builder.build();
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) BlockECReconstructionCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto)

Example 7 with BlockECReconstructionInfo

use of org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo in project hadoop by apache.

the class ErasureCodingWorker method processErasureCodingTasks.

/**
   * Handles the Erasure Coding reconstruction work commands.
   *
   * @param ecTasks BlockECReconstructionInfo
   *
   */
public void processErasureCodingTasks(Collection<BlockECReconstructionInfo> ecTasks) {
    for (BlockECReconstructionInfo reconInfo : ecTasks) {
        try {
            StripedReconstructionInfo stripedReconInfo = new StripedReconstructionInfo(reconInfo.getExtendedBlock(), reconInfo.getErasureCodingPolicy(), reconInfo.getLiveBlockIndices(), reconInfo.getSourceDnInfos(), reconInfo.getTargetDnInfos(), reconInfo.getTargetStorageTypes());
            final StripedBlockReconstructor task = new StripedBlockReconstructor(this, stripedReconInfo);
            if (task.hasValidTargets()) {
                stripedReconstructionPool.submit(task);
            } else {
                LOG.warn("No missing internal block. Skip reconstruction for task:{}", reconInfo);
            }
        } catch (Throwable e) {
            LOG.warn("Failed to reconstruct striped block {}", reconInfo.getExtendedBlock().getLocalBlock(), e);
        }
    }
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo)

Example 8 with BlockECReconstructionInfo

use of org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo in project hadoop by apache.

the class TestReconstructStripedBlocks method doTestMissingStripedBlock.

/**
   * Start GROUP_SIZE + 1 datanodes.
   * Inject striped blocks to first GROUP_SIZE datanodes.
   * Then make numOfBusy datanodes busy, make numOfMissed datanodes missed.
   * Then trigger BlockManager to compute reconstruction works. (so all
   * reconstruction work will be scheduled to the last datanode)
   * Finally, verify the reconstruction work of the last datanode.
   */
private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) throws Exception {
    Configuration conf = new HdfsConfiguration();
    initConf(conf);
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1).build();
    try {
        cluster.waitActive();
        final int numBlocks = 4;
        DFSTestUtil.createStripedFile(cluster, filePath, dirPath, numBlocks, 1, true);
        // all blocks will be located at first GROUP_SIZE DNs, the last DN is
        // empty because of the util function createStripedFile
        // make sure the file is complete in NN
        final INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile();
        assertFalse(fileNode.isUnderConstruction());
        assertTrue(fileNode.isStriped());
        BlockInfo[] blocks = fileNode.getBlocks();
        assertEquals(numBlocks, blocks.length);
        for (BlockInfo blk : blocks) {
            assertTrue(blk.isStriped());
            assertTrue(blk.isComplete());
            assertEquals(cellSize * dataBlocks, blk.getNumBytes());
            final BlockInfoStriped sb = (BlockInfoStriped) blk;
            assertEquals(groupSize, sb.numNodes());
        }
        final BlockManager bm = cluster.getNamesystem().getBlockManager();
        BlockInfo firstBlock = fileNode.getBlocks()[0];
        DatanodeStorageInfo[] storageInfos = bm.getStorages(firstBlock);
        // make numOfBusy nodes busy
        int i = 0;
        for (; i < numOfBusy; i++) {
            DatanodeDescriptor busyNode = storageInfos[i].getDatanodeDescriptor();
            for (int j = 0; j < maxReplicationStreams + 1; j++) {
                BlockManagerTestUtil.addBlockToBeReplicated(busyNode, new Block(j), new DatanodeStorageInfo[] { storageInfos[0] });
            }
        }
        // make numOfMissed internal blocks missed
        for (; i < numOfBusy + numOfMissed; i++) {
            DatanodeDescriptor missedNode = storageInfos[i].getDatanodeDescriptor();
            assertEquals(numBlocks, missedNode.numBlocks());
            bm.getDatanodeManager().removeDatanode(missedNode);
        }
        BlockManagerTestUtil.getComputedDatanodeWork(bm);
        // all the reconstruction work will be scheduled on the last DN
        DataNode lastDn = cluster.getDataNodes().get(groupSize);
        DatanodeDescriptor last = bm.getDatanodeManager().getDatanode(lastDn.getDatanodeId());
        assertEquals("Counting the number of outstanding EC tasks", numBlocks, last.getNumberOfBlocksToBeErasureCoded());
        List<BlockECReconstructionInfo> reconstruction = last.getErasureCodeCommand(numBlocks);
        for (BlockECReconstructionInfo info : reconstruction) {
            assertEquals(1, info.getTargetDnInfos().length);
            assertEquals(last, info.getTargetDnInfos()[0]);
            assertEquals(info.getSourceDnInfos().length, info.getLiveBlockIndices().length);
            if (groupSize - numOfMissed == dataBlocks) {
                // It's a QUEUE_HIGHEST_PRIORITY block, so the busy DNs will be chosen
                // to make sure we have NUM_DATA_BLOCKS DNs to do reconstruction
                // work.
                assertEquals(dataBlocks, info.getSourceDnInfos().length);
            } else {
                // The block has no highest priority, so we don't use the busy DNs as
                // sources
                assertEquals(groupSize - numOfMissed - numOfBusy, info.getSourceDnInfos().length);
            }
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)

Aggregations

BlockECReconstructionInfo (org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo)8 ArrayList (java.util.ArrayList)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)3 BlockECReconstructionCommandProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 BlockECReconstructionCommand (org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand)2 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)2 Test (org.junit.Test)2 ByteString (com.google.protobuf.ByteString)1 Configuration (org.apache.hadoop.conf.Configuration)1 StorageType (org.apache.hadoop.fs.StorageType)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)1 BlockECReconstructionInfoProto (org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto)1