Search in sources :

Example 86 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestBlockManager method blockOnNodes.

private BlockInfo blockOnNodes(long blkId, List<DatanodeDescriptor> nodes) {
    Block block = new Block(blkId);
    BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
    for (DatanodeDescriptor dn : nodes) {
        for (DatanodeStorageInfo storage : dn.getStorageInfos()) {
            blockInfo.addStorage(storage, blockInfo);
        }
    }
    return blockInfo;
}
Also used : ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 87 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestBlockInfoStriped method testAddStorageWithReplicatedBlock.

@Test(expected = IllegalArgumentException.class)
public void testAddStorageWithReplicatedBlock() {
    DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
    BlockInfo replica = new BlockInfoContiguous(new Block(1000L), (short) 3);
    info.addStorage(storage, replica);
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 88 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestBlockManager method testHighestPriReplSrcChosenDespiteMaxReplLimit.

/**
   * Test that a source node for a highest-priority reconstruction is chosen
   * even if all available source nodes have reached their replication limits.
   */
@Test
public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws Exception {
    bm.maxReplicationStreams = 0;
    bm.replicationStreamsHardLimit = 1;
    // arbitrary
    long blockId = 42;
    Block aBlock = new Block(blockId, 0, 0);
    List<DatanodeDescriptor> origNodes = getNodes(0, 1);
    // Add the block to the first node.
    addBlockOnNodes(blockId, origNodes.subList(0, 1));
    List<DatanodeDescriptor> cntNodes = new LinkedList<DatanodeDescriptor>();
    List<DatanodeStorageInfo> liveNodes = new LinkedList<DatanodeStorageInfo>();
    assertNotNull("Chooses source node for a highest-priority replication" + " even if all available source nodes have reached their replication" + " limits below the hard limit.", bm.chooseSourceDatanodes(bm.getStoredBlock(aBlock), cntNodes, liveNodes, new NumberReplicas(), new ArrayList<Byte>(), LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY)[0]);
    assertEquals("Does not choose a source node for a less-than-highest-priority" + " replication since all available source nodes have reached" + " their replication limits.", 0, bm.chooseSourceDatanodes(bm.getStoredBlock(aBlock), cntNodes, liveNodes, new NumberReplicas(), new ArrayList<Byte>(), LowRedundancyBlocks.QUEUE_VERY_LOW_REDUNDANCY).length);
    // Increase the replication count to test replication count > hard limit
    DatanodeStorageInfo[] targets = { origNodes.get(1).getStorageInfos()[0] };
    origNodes.get(0).addBlockToBeReplicated(aBlock, targets);
    assertEquals("Does not choose a source node for a highest-priority" + " replication when all available nodes exceed the hard limit.", 0, bm.chooseSourceDatanodes(bm.getStoredBlock(aBlock), cntNodes, liveNodes, new NumberReplicas(), new ArrayList<Byte>(), LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY).length);
}
Also used : Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Example 89 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestBlockManagerSafeMode method setupMockCluster.

/**
   * Set up the mock context.
   *
   * - extension is always needed (default period is {@link #EXTENSION} ms
   * - datanode threshold is always reached via mock
   * - safe block is 0 and it needs {@link #BLOCK_THRESHOLD} to reach threshold
   * - write/read lock is always held by current thread
   *
   * @throws IOException
   */
@Before
public void setupMockCluster() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setDouble(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, THRESHOLD);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, EXTENSION);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, DATANODE_NUM);
    fsn = mock(FSNamesystem.class);
    doReturn(true).when(fsn).hasWriteLock();
    doReturn(true).when(fsn).hasReadLock();
    doReturn(true).when(fsn).isRunning();
    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
    bm = spy(new BlockManager(fsn, false, conf));
    doReturn(true).when(bm).isGenStampInFuture(any(Block.class));
    dn = spy(bm.getDatanodeManager());
    Whitebox.setInternalState(bm, "datanodeManager", dn);
    // the datanode threshold is always met
    when(dn.getNumLiveDataNodes()).thenReturn(DATANODE_NUM);
    bmSafeMode = new BlockManagerSafeMode(bm, fsn, false, conf);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Block(org.apache.hadoop.hdfs.protocol.Block) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Before(org.junit.Before)

Example 90 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestPendingReconstruction method testProcessPendingReconstructions.

/* Test that processpendingReconstructions will use the most recent
 * blockinfo from the blocksmap by placing a larger genstamp into
 * the blocksmap.
 */
@Test
public void testProcessPendingReconstructions() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
    MiniDFSCluster cluster = null;
    Block block;
    BlockInfo blockInfo;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNamesystem();
        BlockManager blkManager = fsn.getBlockManager();
        PendingReconstructionBlocks pendingReconstruction = blkManager.pendingReconstruction;
        LowRedundancyBlocks neededReconstruction = blkManager.neededReconstruction;
        BlocksMap blocksMap = blkManager.blocksMap;
        //
        // Add 1 block to pendingReconstructions with GenerationStamp = 0.
        //
        block = new Block(1, 1, 0);
        blockInfo = new BlockInfoContiguous(block, (short) 3);
        pendingReconstruction.increment(blockInfo, DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(1)));
        BlockCollection bc = Mockito.mock(BlockCollection.class);
        // Place into blocksmap with GenerationStamp = 1
        blockInfo.setGenerationStamp(1);
        blocksMap.addBlockCollection(blockInfo, bc);
        assertEquals("Size of pendingReconstructions ", 1, pendingReconstruction.size());
        // Add a second block to pendingReconstructions that has no
        // corresponding entry in blocksmap
        block = new Block(2, 2, 0);
        blockInfo = new BlockInfoContiguous(block, (short) 3);
        pendingReconstruction.increment(blockInfo, DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(1)));
        // verify 2 blocks in pendingReconstructions
        assertEquals("Size of pendingReconstructions ", 2, pendingReconstruction.size());
        //
        while (pendingReconstruction.size() > 0) {
            try {
                Thread.sleep(100);
            } catch (Exception e) {
            }
        }
        //
        while (neededReconstruction.size() == 0) {
            try {
                Thread.sleep(100);
            } catch (Exception e) {
            }
        }
        // is now 1
        for (Block b : neededReconstruction) {
            assertEquals("Generation stamp is 1 ", 1, b.getGenerationStamp());
        }
        // Verify size of neededReconstruction is exactly 1.
        assertEquals("size of neededReconstruction is 1 ", 1, neededReconstruction.size());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10