use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand in project hadoop by apache.
the class PBHelper method convert.
public static BlockRecoveryCommand convert(BlockRecoveryCommandProto recoveryCmd) {
List<RecoveringBlockProto> list = recoveryCmd.getBlocksList();
List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>(list.size());
for (RecoveringBlockProto rbp : list) {
recoveringBlocks.add(PBHelper.convert(rbp));
}
return new BlockRecoveryCommand(recoveringBlocks);
}
use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand in project hadoop by apache.
the class TestPBHelper method testConvertBlockRecoveryCommand.
@Test
public void testConvertBlockRecoveryCommand() {
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
List<RecoveringBlock> blks = ImmutableList.of(new RecoveringBlock(getExtendedBlock(1), dnInfo, 3), new RecoveringBlock(getExtendedBlock(2), dnInfo, 3));
BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());
BlockRecoveryCommand cmd2 = PBHelper.convert(proto);
List<RecoveringBlock> cmd2Blks = Lists.newArrayList(cmd2.getRecoveringBlocks());
assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
assertEquals(cmd.toString(), cmd2.toString());
}
use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand in project hadoop by apache.
the class TestHeartbeatHandling method testHeartbeatBlockRecovery.
/**
* Test if
* {@link FSNamesystem#handleHeartbeat}
* correctly selects data node targets for block recovery.
*/
@Test
public void testHeartbeatBlockRecovery() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final HeartbeatManager hm = namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager();
final String poolId = namesystem.getBlockPoolId();
final DatanodeRegistration nodeReg1 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1);
dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
final DatanodeRegistration nodeReg2 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId);
final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2);
dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
final DatanodeRegistration nodeReg3 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId);
final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3);
dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
try {
namesystem.writeLock();
synchronized (hm) {
NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
NameNodeAdapter.sendHeartBeat(nodeReg3, dd3, namesystem);
// Test with all alive nodes.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
DFSTestUtil.resetLastUpdatesWithOffset(dd2, 0);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
final DatanodeStorageInfo[] storages = { dd1.getStorageInfos()[0], dd2.getStorageInfos()[0], dd3.getStorageInfos()[0] };
BlockInfo blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
BlockRecoveryCommand recoveryCommand = (BlockRecoveryCommand) cmds[0];
assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
DatanodeInfo[] recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
assertEquals(3, recoveringNodes.length);
assertEquals(recoveringNodes[0], dd1);
assertEquals(recoveringNodes[1], dd2);
assertEquals(recoveringNodes[2], dd3);
// Test with one stale node.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
// More than the default stale interval of 30 seconds.
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
recoveryCommand = (BlockRecoveryCommand) cmds[0];
assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
assertEquals(2, recoveringNodes.length);
// dd2 is skipped.
assertEquals(recoveringNodes[0], dd1);
assertEquals(recoveringNodes[1], dd3);
// Test with all stale node.
DFSTestUtil.resetLastUpdatesWithOffset(dd1, -60 * 1000);
// More than the default stale interval of 30 seconds.
DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
DFSTestUtil.resetLastUpdatesWithOffset(dd3, -80 * 1000);
blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
dd1.addBlockToBeRecovered(blockInfo);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
recoveryCommand = (BlockRecoveryCommand) cmds[0];
assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
// Only dd1 is included since it heart beated and hence its not stale
// when the list of recovery blocks is constructed.
assertEquals(3, recoveringNodes.length);
assertEquals(recoveringNodes[0], dd1);
assertEquals(recoveringNodes[1], dd2);
assertEquals(recoveringNodes[2], dd3);
}
} finally {
namesystem.writeUnlock();
}
} finally {
cluster.shutdown();
}
}
Aggregations