use of org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo in project hadoop by apache.
the class PBHelper method convert.
public static BlockECReconstructionCommandProto convert(BlockECReconstructionCommand blkECReconstructionCmd) {
BlockECReconstructionCommandProto.Builder builder = BlockECReconstructionCommandProto.newBuilder();
Collection<BlockECReconstructionInfo> blockECRInfos = blkECReconstructionCmd.getECTasks();
for (BlockECReconstructionInfo blkECReconstructInfo : blockECRInfos) {
builder.addBlockECReconstructioninfo(convertBlockECRecoveryInfo(blkECReconstructInfo));
}
return builder.build();
}
use of org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo in project hadoop by apache.
the class ErasureCodingWorker method processErasureCodingTasks.
/**
* Handles the Erasure Coding reconstruction work commands.
*
* @param ecTasks BlockECReconstructionInfo
*
*/
public void processErasureCodingTasks(Collection<BlockECReconstructionInfo> ecTasks) {
for (BlockECReconstructionInfo reconInfo : ecTasks) {
try {
StripedReconstructionInfo stripedReconInfo = new StripedReconstructionInfo(reconInfo.getExtendedBlock(), reconInfo.getErasureCodingPolicy(), reconInfo.getLiveBlockIndices(), reconInfo.getSourceDnInfos(), reconInfo.getTargetDnInfos(), reconInfo.getTargetStorageTypes());
final StripedBlockReconstructor task = new StripedBlockReconstructor(this, stripedReconInfo);
if (task.hasValidTargets()) {
stripedReconstructionPool.submit(task);
} else {
LOG.warn("No missing internal block. Skip reconstruction for task:{}", reconInfo);
}
} catch (Throwable e) {
LOG.warn("Failed to reconstruct striped block {}", reconInfo.getExtendedBlock().getLocalBlock(), e);
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo in project hadoop by apache.
the class TestReconstructStripedBlocks method doTestMissingStripedBlock.
/**
* Start GROUP_SIZE + 1 datanodes.
* Inject striped blocks to first GROUP_SIZE datanodes.
* Then make numOfBusy datanodes busy, make numOfMissed datanodes missed.
* Then trigger BlockManager to compute reconstruction works. (so all
* reconstruction work will be scheduled to the last datanode)
* Finally, verify the reconstruction work of the last datanode.
*/
private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) throws Exception {
Configuration conf = new HdfsConfiguration();
initConf(conf);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1).build();
try {
cluster.waitActive();
final int numBlocks = 4;
DFSTestUtil.createStripedFile(cluster, filePath, dirPath, numBlocks, 1, true);
// all blocks will be located at first GROUP_SIZE DNs, the last DN is
// empty because of the util function createStripedFile
// make sure the file is complete in NN
final INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile();
assertFalse(fileNode.isUnderConstruction());
assertTrue(fileNode.isStriped());
BlockInfo[] blocks = fileNode.getBlocks();
assertEquals(numBlocks, blocks.length);
for (BlockInfo blk : blocks) {
assertTrue(blk.isStriped());
assertTrue(blk.isComplete());
assertEquals(cellSize * dataBlocks, blk.getNumBytes());
final BlockInfoStriped sb = (BlockInfoStriped) blk;
assertEquals(groupSize, sb.numNodes());
}
final BlockManager bm = cluster.getNamesystem().getBlockManager();
BlockInfo firstBlock = fileNode.getBlocks()[0];
DatanodeStorageInfo[] storageInfos = bm.getStorages(firstBlock);
// make numOfBusy nodes busy
int i = 0;
for (; i < numOfBusy; i++) {
DatanodeDescriptor busyNode = storageInfos[i].getDatanodeDescriptor();
for (int j = 0; j < maxReplicationStreams + 1; j++) {
BlockManagerTestUtil.addBlockToBeReplicated(busyNode, new Block(j), new DatanodeStorageInfo[] { storageInfos[0] });
}
}
// make numOfMissed internal blocks missed
for (; i < numOfBusy + numOfMissed; i++) {
DatanodeDescriptor missedNode = storageInfos[i].getDatanodeDescriptor();
assertEquals(numBlocks, missedNode.numBlocks());
bm.getDatanodeManager().removeDatanode(missedNode);
}
BlockManagerTestUtil.getComputedDatanodeWork(bm);
// all the reconstruction work will be scheduled on the last DN
DataNode lastDn = cluster.getDataNodes().get(groupSize);
DatanodeDescriptor last = bm.getDatanodeManager().getDatanode(lastDn.getDatanodeId());
assertEquals("Counting the number of outstanding EC tasks", numBlocks, last.getNumberOfBlocksToBeErasureCoded());
List<BlockECReconstructionInfo> reconstruction = last.getErasureCodeCommand(numBlocks);
for (BlockECReconstructionInfo info : reconstruction) {
assertEquals(1, info.getTargetDnInfos().length);
assertEquals(last, info.getTargetDnInfos()[0]);
assertEquals(info.getSourceDnInfos().length, info.getLiveBlockIndices().length);
if (groupSize - numOfMissed == dataBlocks) {
// It's a QUEUE_HIGHEST_PRIORITY block, so the busy DNs will be chosen
// to make sure we have NUM_DATA_BLOCKS DNs to do reconstruction
// work.
assertEquals(dataBlocks, info.getSourceDnInfos().length);
} else {
// The block has no highest priority, so we don't use the busy DNs as
// sources
assertEquals(groupSize - numOfMissed - numOfBusy, info.getSourceDnInfos().length);
}
}
} finally {
cluster.shutdown();
}
}
Aggregations