use of org.apache.hadoop.hdfs.server.protocol.BlockCommand in project hadoop by apache.
the class TestPBHelper method testConvertBlockCommand.
@Test
public void testConvertBlockCommand() {
Block[] blocks = new Block[] { new Block(21), new Block(22) };
DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1], new DatanodeInfo[2] };
dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
String[][] storageIDs = { { "s00" }, { "s10", "s11" } };
StorageType[][] storageTypes = { { StorageType.DEFAULT }, { StorageType.DEFAULT, StorageType.DEFAULT } };
BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1", blocks, dnInfos, storageTypes, storageIDs);
BlockCommandProto bcProto = PBHelper.convert(bc);
BlockCommand bc2 = PBHelper.convert(bcProto);
assertEquals(bc.getAction(), bc2.getAction());
assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
Block[] blocks2 = bc2.getBlocks();
for (int i = 0; i < blocks.length; i++) {
assertEquals(blocks[i], blocks2[i]);
}
DatanodeInfo[][] dnInfos2 = bc2.getTargets();
assertEquals(dnInfos.length, dnInfos2.length);
for (int i = 0; i < dnInfos.length; i++) {
DatanodeInfo[] d1 = dnInfos[i];
DatanodeInfo[] d2 = dnInfos2[i];
assertEquals(d1.length, d2.length);
for (int j = 0; j < d1.length; j++) {
compare(d1[j], d2[j]);
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.BlockCommand in project hadoop by apache.
the class TestHeartbeatHandling method testHeartbeat.
/**
* Test if
* {@link FSNamesystem#handleHeartbeat}
* can pick up replication and/or invalidate requests and observes the max
* limit
*/
@Test
public void testHeartbeat() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final HeartbeatManager hm = namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager();
final String poolId = namesystem.getBlockPoolId();
final DatanodeRegistration nodeReg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
final String storageID = DatanodeStorage.generateUuid();
dd.updateStorage(new DatanodeStorage(storageID));
final int REMAINING_BLOCKS = 1;
final int MAX_REPLICATE_LIMIT = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
final int MAX_INVALIDATE_BLOCKS = 2 * MAX_INVALIDATE_LIMIT + REMAINING_BLOCKS;
final int MAX_REPLICATE_BLOCKS = 2 * MAX_REPLICATE_LIMIT + REMAINING_BLOCKS;
final DatanodeStorageInfo[] ONE_TARGET = { dd.getStorageInfo(storageID) };
try {
namesystem.writeLock();
synchronized (hm) {
for (int i = 0; i < MAX_REPLICATE_BLOCKS; i++) {
dd.addBlockToBeReplicated(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP), ONE_TARGET);
}
DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand) cmds[0]).getBlocks().length);
ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
for (int i = 0; i < MAX_INVALIDATE_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
assertEquals(2, cmds.length);
assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand) cmds[0]).getBlocks().length);
assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand) cmds[1]).getBlocks().length);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
assertEquals(2, cmds.length);
assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
assertEquals(REMAINING_BLOCKS, ((BlockCommand) cmds[0]).getBlocks().length);
assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand) cmds[1]).getBlocks().length);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
assertEquals(1, cmds.length);
assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
assertEquals(REMAINING_BLOCKS, ((BlockCommand) cmds[0]).getBlocks().length);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem).getCommands();
assertEquals(0, cmds.length);
}
} finally {
namesystem.writeUnlock();
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.protocol.BlockCommand in project hadoop by apache.
the class TestBPOfferService method testIgnoreDeletionsFromNonActive.
/**
* Test that DNA_INVALIDATE commands from the standby are ignored.
*/
@Test
public void testIgnoreDeletionsFromNonActive() throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
// Ask to invalidate FAKE_BLOCK when block report hits the
// standby
Mockito.doReturn(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, FAKE_BPID, new Block[] { FAKE_BLOCK.getLocalBlock() })).when(mockNN2).blockReport(Mockito.<DatanodeRegistration>anyObject(), Mockito.eq(FAKE_BPID), Mockito.<StorageBlockReport[]>anyObject(), Mockito.<BlockReportContext>anyObject());
bpos.start();
try {
waitForInitialization(bpos);
// Should get block reports from both NNs
waitForBlockReport(mockNN1);
waitForBlockReport(mockNN2);
} finally {
bpos.stop();
bpos.join();
}
// Should ignore the delete command from the standby
Mockito.verify(mockFSDataset, Mockito.never()).invalidate(Mockito.eq(FAKE_BPID), (Block[]) Mockito.anyObject());
}
Aggregations