Search in sources :

Example 51 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestComputeInvalidateWork method testCompInvalidate.

/**
   * Test if {@link BlockManager#computeInvalidateWork(int)}
   * can schedule invalidate work correctly 
   */
@Test(timeout = 120000)
public void testCompInvalidate() throws Exception {
    final int blockInvalidateLimit = bm.getDatanodeManager().getBlockInvalidateLimit();
    namesystem.writeLock();
    try {
        for (int i = 0; i < nodes.length; i++) {
            for (int j = 0; j < 3 * blockInvalidateLimit + 1; j++) {
                Block block = new Block(i * (blockInvalidateLimit + 1) + j, 0, GenerationStamp.LAST_RESERVED_STAMP);
                bm.addToInvalidates(block, nodes[i]);
            }
        }
        assertEquals(blockInvalidateLimit * NUM_OF_DATANODES, bm.computeInvalidateWork(NUM_OF_DATANODES + 1));
        assertEquals(blockInvalidateLimit * NUM_OF_DATANODES, bm.computeInvalidateWork(NUM_OF_DATANODES));
        assertEquals(blockInvalidateLimit * (NUM_OF_DATANODES - 1), bm.computeInvalidateWork(NUM_OF_DATANODES - 1));
        int workCount = bm.computeInvalidateWork(1);
        if (workCount == 1) {
            assertEquals(blockInvalidateLimit + 1, bm.computeInvalidateWork(2));
        } else {
            assertEquals(workCount, blockInvalidateLimit);
            assertEquals(2, bm.computeInvalidateWork(2));
        }
    } finally {
        namesystem.writeUnlock();
    }
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 52 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestDatanodeDescriptor method testGetInvalidateBlocks.

/**
   * Test that getInvalidateBlocks observes the maxlimit.
   */
@Test
public void testGetInvalidateBlocks() throws Exception {
    final int MAX_BLOCKS = 10;
    final int REMAINING_BLOCKS = 2;
    final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
    DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
    ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
    for (int i = 0; i < MAX_BLOCKS; i++) {
        blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
    }
    dd.addBlocksToBeInvalidated(blockList);
    Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);
    assertEquals(bc.length, MAX_LIMIT);
    bc = dd.getInvalidateBlocks(MAX_LIMIT);
    assertEquals(bc.length, REMAINING_BLOCKS);
}
Also used : ArrayList(java.util.ArrayList) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 53 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestDatanodeDescriptor method testBlocksCounter.

@Test
public void testBlocksCounter() throws Exception {
    DatanodeDescriptor dd = BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
    assertEquals(0, dd.numBlocks());
    BlockInfo blk = new BlockInfoContiguous(new Block(1L), (short) 1);
    BlockInfo blk1 = new BlockInfoContiguous(new Block(2L), (short) 2);
    DatanodeStorageInfo[] storages = dd.getStorageInfos();
    assertTrue(storages.length > 0);
    // add first block
    assertTrue(storages[0].addBlock(blk) == AddBlockResult.ADDED);
    assertEquals(1, dd.numBlocks());
    // remove a non-existent block
    assertFalse(BlocksMap.removeBlock(dd, blk1));
    assertEquals(1, dd.numBlocks());
    // add an existent block
    assertFalse(storages[0].addBlock(blk) == AddBlockResult.ADDED);
    assertEquals(1, dd.numBlocks());
    // add second block
    assertTrue(storages[0].addBlock(blk1) == AddBlockResult.ADDED);
    assertEquals(2, dd.numBlocks());
    // remove first block
    assertTrue(BlocksMap.removeBlock(dd, blk));
    assertEquals(1, dd.numBlocks());
    // remove second block
    assertTrue(BlocksMap.removeBlock(dd, blk1));
    assertEquals(0, dd.numBlocks());
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 54 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestLowRedundancyBlockQueues method assertInLevel.

/**
   * Determine whether or not a block is in a level without changing the API.
   * Instead get the per-level iterator and run though it looking for a match.
   * If the block is not found, an assertion is thrown.
   *
   * This is inefficient, but this is only a test case.
   * @param queues queues to scan
   * @param block block to look for
   * @param level level to select
   */
private void assertInLevel(LowRedundancyBlocks queues, Block block, int level) {
    final Iterator<BlockInfo> bi = queues.iterator(level);
    while (bi.hasNext()) {
        Block next = bi.next();
        if (block.equals(next)) {
            return;
        }
    }
    fail("Block " + block + " not found in level " + level);
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block)

Example 55 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestBlockManager method addBlockToBM.

private BlockInfo addBlockToBM(long blkId) {
    Block block = new Block(blkId);
    BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
    long inodeId = ++mockINodeId;
    final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
    bm.blocksMap.addBlockCollection(blockInfo, bc);
    blockInfo.setBlockCollectionId(inodeId);
    doReturn(bc).when(fsn).getBlockCollection(inodeId);
    return blockInfo;
}
Also used : ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) TestINodeFile(org.apache.hadoop.hdfs.server.namenode.TestINodeFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10