use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestComputeInvalidateWork method testCompInvalidate.
/**
* Test if {@link BlockManager#computeInvalidateWork(int)}
* can schedule invalidate work correctly
*/
@Test(timeout = 120000)
public void testCompInvalidate() throws Exception {
final int blockInvalidateLimit = bm.getDatanodeManager().getBlockInvalidateLimit();
namesystem.writeLock();
try {
for (int i = 0; i < nodes.length; i++) {
for (int j = 0; j < 3 * blockInvalidateLimit + 1; j++) {
Block block = new Block(i * (blockInvalidateLimit + 1) + j, 0, GenerationStamp.LAST_RESERVED_STAMP);
bm.addToInvalidates(block, nodes[i]);
}
}
assertEquals(blockInvalidateLimit * NUM_OF_DATANODES, bm.computeInvalidateWork(NUM_OF_DATANODES + 1));
assertEquals(blockInvalidateLimit * NUM_OF_DATANODES, bm.computeInvalidateWork(NUM_OF_DATANODES));
assertEquals(blockInvalidateLimit * (NUM_OF_DATANODES - 1), bm.computeInvalidateWork(NUM_OF_DATANODES - 1));
int workCount = bm.computeInvalidateWork(1);
if (workCount == 1) {
assertEquals(blockInvalidateLimit + 1, bm.computeInvalidateWork(2));
} else {
assertEquals(workCount, blockInvalidateLimit);
assertEquals(2, bm.computeInvalidateWork(2));
}
} finally {
namesystem.writeUnlock();
}
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestDatanodeDescriptor method testGetInvalidateBlocks.
/**
* Test that getInvalidateBlocks observes the maxlimit.
*/
@Test
public void testGetInvalidateBlocks() throws Exception {
final int MAX_BLOCKS = 10;
final int REMAINING_BLOCKS = 2;
final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
for (int i = 0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length, MAX_LIMIT);
bc = dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length, REMAINING_BLOCKS);
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestDatanodeDescriptor method testBlocksCounter.
@Test
public void testBlocksCounter() throws Exception {
DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
assertEquals(0, dd.numBlocks());
BlockInfo blk = new BlockInfoContiguous(new Block(1L), (short) 1);
BlockInfo blk1 = new BlockInfoContiguous(new Block(2L), (short) 2);
DatanodeStorageInfo[] storages = dd.getStorageInfos();
assertTrue(storages.length > 0);
// add first block
assertTrue(storages[0].addBlock(blk) == AddBlockResult.ADDED);
assertEquals(1, dd.numBlocks());
// remove a non-existent block
assertFalse(BlocksMap.removeBlock(dd, blk1));
assertEquals(1, dd.numBlocks());
// add an existent block
assertFalse(storages[0].addBlock(blk) == AddBlockResult.ADDED);
assertEquals(1, dd.numBlocks());
// add second block
assertTrue(storages[0].addBlock(blk1) == AddBlockResult.ADDED);
assertEquals(2, dd.numBlocks());
// remove first block
assertTrue(BlocksMap.removeBlock(dd, blk));
assertEquals(1, dd.numBlocks());
// remove second block
assertTrue(BlocksMap.removeBlock(dd, blk1));
assertEquals(0, dd.numBlocks());
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestLowRedundancyBlockQueues method assertInLevel.
/**
* Determine whether or not a block is in a level without changing the API.
* Instead get the per-level iterator and run though it looking for a match.
* If the block is not found, an assertion is thrown.
*
* This is inefficient, but this is only a test case.
* @param queues queues to scan
* @param block block to look for
* @param level level to select
*/
private void assertInLevel(LowRedundancyBlocks queues, Block block, int level) {
final Iterator<BlockInfo> bi = queues.iterator(level);
while (bi.hasNext()) {
Block next = bi.next();
if (block.equals(next)) {
return;
}
}
fail("Block " + block + " not found in level " + level);
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestBlockManager method addBlockToBM.
private BlockInfo addBlockToBM(long blkId) {
Block block = new Block(blkId);
BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
long inodeId = ++mockINodeId;
final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
bm.blocksMap.addBlockCollection(blockInfo, bc);
blockInfo.setBlockCollectionId(inodeId);
doReturn(bc).when(fsn).getBlockCollection(inodeId);
return blockInfo;
}
Aggregations