Search in sources :

Example 1 with BlockCommand

use of org.apache.hadoop.hdfs.server.protocol.BlockCommand in project hadoop by apache.

the class TestPBHelper method testConvertBlockCommand.

@Test
public void testConvertBlockCommand() {
    Block[] blocks = new Block[] { new Block(21), new Block(22) };
    DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1], new DatanodeInfo[2] };
    dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
    dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
    dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
    String[][] storageIDs = { { "s00" }, { "s10", "s11" } };
    StorageType[][] storageTypes = { { StorageType.DEFAULT }, { StorageType.DEFAULT, StorageType.DEFAULT } };
    BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1", blocks, dnInfos, storageTypes, storageIDs);
    BlockCommandProto bcProto = PBHelper.convert(bc);
    BlockCommand bc2 = PBHelper.convert(bcProto);
    assertEquals(bc.getAction(), bc2.getAction());
    assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
    Block[] blocks2 = bc2.getBlocks();
    for (int i = 0; i < blocks.length; i++) {
        assertEquals(blocks[i], blocks2[i]);
    }
    DatanodeInfo[][] dnInfos2 = bc2.getTargets();
    assertEquals(dnInfos.length, dnInfos2.length);
    for (int i = 0; i < dnInfos.length; i++) {
        DatanodeInfo[] d1 = dnInfos[i];
        DatanodeInfo[] d2 = dnInfos2[i];
        assertEquals(d1.length, d2.length);
        for (int j = 0; j < d1.length; j++) {
            compare(d1[j], d2[j]);
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto) BlockCommand(org.apache.hadoop.hdfs.server.protocol.BlockCommand) Test(org.junit.Test)

Example 2 with BlockCommand

use of org.apache.hadoop.hdfs.server.protocol.BlockCommand in project hadoop by apache.

the class TestHeartbeatHandling method testHeartbeat.

/**
   * Test if
   * {@link FSNamesystem#handleHeartbeat}
   * can pick up replication and/or invalidate requests and observes the max
   * limit
   */
@Test
public void testHeartbeat() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final HeartbeatManager hm = namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager();
        final String poolId = namesystem.getBlockPoolId();
        final DatanodeRegistration nodeReg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
        final String storageID = DatanodeStorage.generateUuid();
        dd.updateStorage(new DatanodeStorage(storageID));
        final int REMAINING_BLOCKS = 1;
        final int MAX_REPLICATE_LIMIT = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
        final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
        final int MAX_INVALIDATE_BLOCKS = 2 * MAX_INVALIDATE_LIMIT + REMAINING_BLOCKS;
        final int MAX_REPLICATE_BLOCKS = 2 * MAX_REPLICATE_LIMIT + REMAINING_BLOCKS;
        final DatanodeStorageInfo[] ONE_TARGET = { dd.getStorageInfo(storageID) };
        try {
            namesystem.writeLock();
            synchronized (hm) {
                for (int i = 0; i < MAX_REPLICATE_BLOCKS; i++) {
                    dd.addBlockToBeReplicated(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP), ONE_TARGET);
                }
                DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
                assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand) cmds[0]).getBlocks().length);
                ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
                for (int i = 0; i < MAX_INVALIDATE_BLOCKS; i++) {
                    blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
                }
                dd.addBlocksToBeInvalidated(blockList);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(2, cmds.length);
                assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
                assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand) cmds[0]).getBlocks().length);
                assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
                assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand) cmds[1]).getBlocks().length);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(2, cmds.length);
                assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
                assertEquals(REMAINING_BLOCKS, ((BlockCommand) cmds[0]).getBlocks().length);
                assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
                assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand) cmds[1]).getBlocks().length);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(1, cmds.length);
                assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
                assertEquals(REMAINING_BLOCKS, ((BlockCommand) cmds[0]).getBlocks().length);
                cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
                assertEquals(0, cmds.length);
            }
        } finally {
            namesystem.writeUnlock();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BlockCommand(org.apache.hadoop.hdfs.server.protocol.BlockCommand) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 3 with BlockCommand

use of org.apache.hadoop.hdfs.server.protocol.BlockCommand in project hadoop by apache.

the class TestBPOfferService method testIgnoreDeletionsFromNonActive.

/**
   * Test that DNA_INVALIDATE commands from the standby are ignored.
   */
@Test
public void testIgnoreDeletionsFromNonActive() throws Exception {
    BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
    // Ask to invalidate FAKE_BLOCK when block report hits the
    // standby
    Mockito.doReturn(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, FAKE_BPID, new Block[] { FAKE_BLOCK.getLocalBlock() })).when(mockNN2).blockReport(Mockito.<DatanodeRegistration>anyObject(), Mockito.eq(FAKE_BPID), Mockito.<StorageBlockReport[]>anyObject(), Mockito.<BlockReportContext>anyObject());
    bpos.start();
    try {
        waitForInitialization(bpos);
        // Should get block reports from both NNs
        waitForBlockReport(mockNN1);
        waitForBlockReport(mockNN2);
    } finally {
        bpos.stop();
        bpos.join();
    }
    // Should ignore the delete command from the standby
    Mockito.verify(mockFSDataset, Mockito.never()).invalidate(Mockito.eq(FAKE_BPID), (Block[]) Mockito.anyObject());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockCommand(org.apache.hadoop.hdfs.server.protocol.BlockCommand) Test(org.junit.Test)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)3 BlockCommand (org.apache.hadoop.hdfs.server.protocol.BlockCommand)3 Test (org.junit.Test)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 ArrayList (java.util.ArrayList)1 Configuration (org.apache.hadoop.conf.Configuration)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 BlockCommandProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto)1 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)1 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)1 DatanodeCommand (org.apache.hadoop.hdfs.server.protocol.DatanodeCommand)1 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)1 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)1 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)1