use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestDecommission method testUsedCapacity.
@Test
public void testUsedCapacity() throws Exception {
int numNamenodes = 1;
int numDatanodes = 2;
startCluster(numNamenodes, numDatanodes);
FSNamesystem ns = getCluster().getNamesystem(0);
BlockManager blockManager = ns.getBlockManager();
DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
long initialUsedCapacity = datanodeStatistics.getCapacityUsed();
long initialTotalCapacity = datanodeStatistics.getCapacityTotal();
long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
namenodeDecomList.add(0, new ArrayList<>(numDatanodes));
ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
//decommission one node
DatanodeInfo decomNode = takeNodeOutofService(0, null, 0, decommissionedNodes, AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
long newUsedCapacity = datanodeStatistics.getCapacityUsed();
long newTotalCapacity = datanodeStatistics.getCapacityTotal();
long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
assertTrue("DfsUsedCapacity should not be the same after a node has " + "been decommissioned!", initialUsedCapacity != newUsedCapacity);
assertTrue("TotalCapacity should not be the same after a node has " + "been decommissioned!", initialTotalCapacity != newTotalCapacity);
assertTrue("BlockPoolUsed should not be the same after a node has " + "been decommissioned!", initialBlockPoolUsed != newBlockPoolUsed);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class BlockReportTestBase method testInterleavedBlockReports.
// See HDFS-10301
@Test(timeout = 300000)
public void testInterleavedBlockReports() throws IOException, ExecutionException, InterruptedException {
int numConcurrentBlockReports = 3;
DataNode dn = cluster.getDataNodes().get(DN_N0);
final String poolId = cluster.getNamesystem().getBlockPoolId();
LOG.info("Block pool id: " + poolId);
final DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
final StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
// Get the list of storage ids associated with the datanode
// before the test
BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
final DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().getDatanode(dn.getDatanodeId());
DatanodeStorageInfo[] storageInfos = dnDescriptor.getStorageInfos();
// Send the block report concurrently using
// numThreads=numConcurrentBlockReports
ExecutorService executorService = Executors.newFixedThreadPool(numConcurrentBlockReports);
List<Future<Void>> futureList = new ArrayList<>(numConcurrentBlockReports);
for (int i = 0; i < numConcurrentBlockReports; i++) {
futureList.add(executorService.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
sendBlockReports(dnR, poolId, reports);
return null;
}
}));
}
for (Future<Void> future : futureList) {
future.get();
}
executorService.shutdown();
// Verify that the storages match before and after the test
Assert.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos());
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestFSNamesystem method testReplQueuesActiveAfterStartupSafemode.
@Test
public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException {
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
FSNamesystem fsn = Mockito.spy(fsNamesystem);
BlockManager bm = fsn.getBlockManager();
Whitebox.setInternalState(bm, "namesystem", fsn);
//Make shouldPopulaeReplQueues return true
HAContext haContext = Mockito.mock(HAContext.class);
HAState haState = Mockito.mock(HAState.class);
Mockito.when(haContext.getState()).thenReturn(haState);
Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
Mockito.when(fsn.getHAContext()).thenReturn(haContext);
//Make NameNode.getNameNodeMetrics() not return null
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues were being populated during very first " + "safemode", !bm.isPopulatingReplQueues());
fsn.leaveSafeMode(false);
assertTrue("FSNamesystem didn't leave safemode", !fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated even after leaving " + "safemode", bm.isPopulatingReplQueues());
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated after entering " + "safemode 2nd time", bm.isPopulatingReplQueues());
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestDFSOutputStream method testNoLocalWriteFlag.
@Test
public void testNoLocalWriteFlag() throws IOException {
DistributedFileSystem fs = cluster.getFileSystem();
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.NO_LOCAL_WRITE, CreateFlag.CREATE);
BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
DatanodeManager dm = bm.getDatanodeManager();
try (FSDataOutputStream os = fs.create(new Path("/test-no-local"), FsPermission.getDefault(), flags, 512, (short) 2, 512, null)) {
// Inject a DatanodeManager that returns one DataNode as local node for
// the client.
DatanodeManager spyDm = spy(dm);
DatanodeDescriptor dn1 = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.LIVE).get(0);
doReturn(dn1).when(spyDm).getDatanodeByHost("127.0.0.1");
Whitebox.setInternalState(bm, "datanodeManager", spyDm);
byte[] buf = new byte[512 * 16];
new Random().nextBytes(buf);
os.write(buf);
} finally {
Whitebox.setInternalState(bm, "datanodeManager", dm);
}
cluster.triggerBlockReports();
final String bpid = cluster.getNamesystem().getBlockPoolId();
// Total number of DataNodes is 3.
assertEquals(3, cluster.getAllBlockReports(bpid).size());
int numDataNodesWithData = 0;
for (Map<DatanodeStorage, BlockListAsLongs> dnBlocks : cluster.getAllBlockReports(bpid)) {
for (BlockListAsLongs blocks : dnBlocks.values()) {
if (blocks.getNumberOfBlocks() > 0) {
numDataNodesWithData++;
break;
}
}
}
// Verify that only one DN has no data.
assertEquals(1, 3 - numDataNodesWithData);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestDFSRename method testRenameWithOverwrite.
/**
* Check the blocks of dst file are cleaned after rename with overwrite
* Restart NN to check the rename successfully
*/
@Test(timeout = 120000)
public void testRenameWithOverwrite() throws Exception {
final short replFactor = 2;
final long blockSize = 512;
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replFactor).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
long fileLen = blockSize * 3;
String src = "/foo/src";
String dst = "/foo/dst";
Path srcPath = new Path(src);
Path dstPath = new Path(dst);
DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(), dst, 0, fileLen);
BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).getBlockManager();
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) != null);
dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) == null);
// Restart NN and check the rename successfully
cluster.restartNameNodes();
assertFalse(dfs.exists(srcPath));
assertTrue(dfs.exists(dstPath));
} finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations