use of org.apache.hadoop.hdfs.server.namenode.Namesystem in project hadoop by apache.
the class TestReplicationPolicy method testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication.
@Test(timeout = 60000)
public void testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication() throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.hasWriteLock()).thenReturn(true);
BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
long blkID1 = ThreadLocalRandom.current().nextLong();
if (blkID1 < 0) {
blkID1 *= -1;
}
long blkID2 = ThreadLocalRandom.current().nextLong();
if (blkID2 < 0) {
blkID2 *= -1;
}
BlockInfo block1 = genBlockInfo(blkID1);
BlockInfo block2 = genBlockInfo(blkID2);
// Adding QUEUE_LOW_REDUNDANCY block
lowRedundancyBlocks.add(block1, 0, 0, 1, 1);
// Adding QUEUE_LOW_REDUNDANCY block
lowRedundancyBlocks.add(block2, 0, 0, 1, 1);
List<List<BlockInfo>> chosenBlocks;
// Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
// from QUEUE_VERY_LOW_REDUNDANCY.
chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
final BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1);
final BlockCollection mbc = mock(BlockCollection.class);
when(mbc.getId()).thenReturn(1000L);
when(mbc.getLastBlock()).thenReturn(info);
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
when(mbc.isUnderConstruction()).thenReturn(true);
ContentSummary cs = mock(ContentSummary.class);
when(cs.getLength()).thenReturn((long) 1);
when(mbc.computeContentSummary(bm.getStoragePolicySuite())).thenReturn(cs);
info.setBlockCollectionId(1000);
bm.addBlockCollection(info, mbc);
DatanodeStorageInfo[] storageAry = { new DatanodeStorageInfo(dataNodes[0], new DatanodeStorage("s1")) };
info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, storageAry);
DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class);
DatanodeDescriptor dn = mock(DatanodeDescriptor.class);
when(dn.isDecommissioned()).thenReturn(true);
when(storage.getState()).thenReturn(DatanodeStorage.State.NORMAL);
when(storage.getDatanodeDescriptor()).thenReturn(dn);
when(storage.removeBlock(any(BlockInfo.class))).thenReturn(true);
when(storage.addBlock(any(BlockInfo.class))).thenReturn(DatanodeStorageInfo.AddBlockResult.ADDED);
info.addStorage(storage, info);
BlockInfo lastBlk = mbc.getLastBlock();
when(mbc.getLastBlock()).thenReturn(lastBlk, info);
bm.convertLastBlockToUnderConstruction(mbc, 0L);
// Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
// from QUEUE_VERY_LOW_REDUNDANCY.
// This block remains and should not be skipped over.
chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
use of org.apache.hadoop.hdfs.server.namenode.Namesystem in project hadoop by apache.
the class TestHeartbeatHandling method testHeartbeatStopWatch.
@Test
public void testHeartbeatStopWatch() throws Exception {
Namesystem ns = Mockito.mock(Namesystem.class);
BlockManager bm = Mockito.mock(BlockManager.class);
Configuration conf = new Configuration();
long recheck = 2000;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, recheck);
HeartbeatManager monitor = new HeartbeatManager(ns, bm, conf);
monitor.restartHeartbeatStopWatch();
assertFalse(monitor.shouldAbortHeartbeatCheck(0));
// sleep shorter than recheck and verify shouldn't abort
Thread.sleep(100);
assertFalse(monitor.shouldAbortHeartbeatCheck(0));
// sleep longer than recheck and verify should abort unless ignore delay
Thread.sleep(recheck);
assertTrue(monitor.shouldAbortHeartbeatCheck(0));
assertFalse(monitor.shouldAbortHeartbeatCheck(-recheck * 3));
// ensure it resets properly
monitor.restartHeartbeatStopWatch();
assertFalse(monitor.shouldAbortHeartbeatCheck(0));
}
use of org.apache.hadoop.hdfs.server.namenode.Namesystem in project hadoop by apache.
the class TestReplicationPolicy method testupdateNeededReplicationsDoesNotCauseSkippedReplication.
@Test(timeout = 60000)
public void testupdateNeededReplicationsDoesNotCauseSkippedReplication() throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.hasReadLock()).thenReturn(true);
BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong());
BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong());
// Adding QUEUE_LOW_REDUNDANCY block
lowRedundancyBlocks.add(block1, 0, 0, 1, 1);
// Adding QUEUE_LOW_REDUNDANCY block
lowRedundancyBlocks.add(block2, 0, 0, 1, 1);
List<List<BlockInfo>> chosenBlocks;
// Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
// from QUEUE_VERY_LOW_REDUNDANCY.
chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
bm.setReplication((short) 0, (short) 1, block1);
// Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
// from QUEUE_VERY_LOW_REDUNDANCY.
// This block remains and should not be skipped over.
chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
Aggregations