Search in sources :

Example 1 with BlockRecoveryCommand

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand in project hadoop by apache.

the class PBHelper method convert.

public static BlockRecoveryCommand convert(BlockRecoveryCommandProto recoveryCmd) {
    List<RecoveringBlockProto> list = recoveryCmd.getBlocksList();
    List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>(list.size());
    for (RecoveringBlockProto rbp : list) {
        recoveringBlocks.add(PBHelper.convert(rbp));
    }
    return new BlockRecoveryCommand(recoveringBlocks);
}
Also used : BlockRecoveryCommand(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) ArrayList(java.util.ArrayList) RecoveringBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto)

Example 2 with BlockRecoveryCommand

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand in project hadoop by apache.

the class TestPBHelper method testConvertBlockRecoveryCommand.

@Test
public void testConvertBlockRecoveryCommand() {
    DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
    List<RecoveringBlock> blks = ImmutableList.of(new RecoveringBlock(getExtendedBlock(1), dnInfo, 3), new RecoveringBlock(getExtendedBlock(2), dnInfo, 3));
    BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
    BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
    assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
    assertEquals(2, proto.getBlocks(1).getBlock().getB().getBlockId());
    BlockRecoveryCommand cmd2 = PBHelper.convert(proto);
    List<RecoveringBlock> cmd2Blks = Lists.newArrayList(cmd2.getRecoveringBlocks());
    assertEquals(blks.get(0).getBlock(), cmd2Blks.get(0).getBlock());
    assertEquals(blks.get(1).getBlock(), cmd2Blks.get(1).getBlock());
    assertEquals(Joiner.on(",").join(blks), Joiner.on(",").join(cmd2Blks));
    assertEquals(cmd.toString(), cmd2.toString());
}
Also used : BlockRecoveryCommand(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) BlockRecoveryCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto) Test(org.junit.Test)

Example 3 with BlockRecoveryCommand

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand in project hadoop by apache.

the class TestHeartbeatHandling method testHeartbeatBlockRecovery.

/**
   * Test if
   * {@link FSNamesystem#handleHeartbeat}
   * correctly selects data node targets for block recovery.
   */
@Test
public void testHeartbeatBlockRecovery() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    try {
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final HeartbeatManager hm = namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager();
        final String poolId = namesystem.getBlockPoolId();
        final DatanodeRegistration nodeReg1 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        final DatanodeDescriptor dd1 = NameNodeAdapter.getDatanode(namesystem, nodeReg1);
        dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
        final DatanodeRegistration nodeReg2 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1), poolId);
        final DatanodeDescriptor dd2 = NameNodeAdapter.getDatanode(namesystem, nodeReg2);
        dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
        final DatanodeRegistration nodeReg3 = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2), poolId);
        final DatanodeDescriptor dd3 = NameNodeAdapter.getDatanode(namesystem, nodeReg3);
        dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
        try {
            namesystem.writeLock();
            synchronized (hm) {
                NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
                NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
                NameNodeAdapter.sendHeartBeat(nodeReg3, dd3, namesystem);
                // Test with all alive nodes.
                DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
                DFSTestUtil.resetLastUpdatesWithOffset(dd2, 0);
                DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
                final DatanodeStorageInfo[] storages = { dd1.getStorageInfos()[0], dd2.getStorageInfos()[0], dd3.getStorageInfos()[0] };
                BlockInfo blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
                blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
                dd1.addBlockToBeRecovered(blockInfo);
                DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
                BlockRecoveryCommand recoveryCommand = (BlockRecoveryCommand) cmds[0];
                assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
                DatanodeInfo[] recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
                assertEquals(3, recoveringNodes.length);
                assertEquals(recoveringNodes[0], dd1);
                assertEquals(recoveringNodes[1], dd2);
                assertEquals(recoveringNodes[2], dd3);
                // Test with one stale node.
                DFSTestUtil.resetLastUpdatesWithOffset(dd1, 0);
                // More than the default stale interval of 30 seconds.
                DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
                DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
                blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
                blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
                dd1.addBlockToBeRecovered(blockInfo);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
                recoveryCommand = (BlockRecoveryCommand) cmds[0];
                assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
                recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
                assertEquals(2, recoveringNodes.length);
                // dd2 is skipped.
                assertEquals(recoveringNodes[0], dd1);
                assertEquals(recoveringNodes[1], dd3);
                // Test with all stale node.
                DFSTestUtil.resetLastUpdatesWithOffset(dd1, -60 * 1000);
                // More than the default stale interval of 30 seconds.
                DFSTestUtil.resetLastUpdatesWithOffset(dd2, -40 * 1000);
                DFSTestUtil.resetLastUpdatesWithOffset(dd3, -80 * 1000);
                blockInfo = new BlockInfoContiguous(new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), (short) 3);
                blockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_RECOVERY, storages);
                dd1.addBlockToBeRecovered(blockInfo);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
                recoveryCommand = (BlockRecoveryCommand) cmds[0];
                assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
                recoveringNodes = recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
                // Only dd1 is included since it heart beated and hence its not stale
                // when the list of recovery blocks is constructed.
                assertEquals(3, recoveringNodes.length);
                assertEquals(recoveringNodes[0], dd1);
                assertEquals(recoveringNodes[1], dd2);
                assertEquals(recoveringNodes[2], dd3);
            }
        } finally {
            namesystem.writeUnlock();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : BlockRecoveryCommand(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

BlockRecoveryCommand (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)2 Test (org.junit.Test)2 ArrayList (java.util.ArrayList)1 Configuration (org.apache.hadoop.conf.Configuration)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 BlockRecoveryCommandProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto)1 RecoveringBlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto)1 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)1 DatanodeCommand (org.apache.hadoop.hdfs.server.protocol.DatanodeCommand)1 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)1 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)1