Search in sources :

Example 1 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestDecommission method testUsedCapacity.

@Test
public void testUsedCapacity() throws Exception {
    int numNamenodes = 1;
    int numDatanodes = 2;
    startCluster(numNamenodes, numDatanodes);
    FSNamesystem ns = getCluster().getNamesystem(0);
    BlockManager blockManager = ns.getBlockManager();
    DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
    long initialUsedCapacity = datanodeStatistics.getCapacityUsed();
    long initialTotalCapacity = datanodeStatistics.getCapacityTotal();
    long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
    ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList = new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
    namenodeDecomList.add(0, new ArrayList<>(numDatanodes));
    ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
    //decommission one node
    DatanodeInfo decomNode = takeNodeOutofService(0, null, 0, decommissionedNodes, AdminStates.DECOMMISSIONED);
    decommissionedNodes.add(decomNode);
    long newUsedCapacity = datanodeStatistics.getCapacityUsed();
    long newTotalCapacity = datanodeStatistics.getCapacityTotal();
    long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
    assertTrue("DfsUsedCapacity should not be the same after a node has " + "been decommissioned!", initialUsedCapacity != newUsedCapacity);
    assertTrue("TotalCapacity should not be the same after a node has " + "been decommissioned!", initialTotalCapacity != newTotalCapacity);
    assertTrue("BlockPoolUsed should not be the same after a node has " + "been decommissioned!", initialBlockPoolUsed != newBlockPoolUsed);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ArrayList(java.util.ArrayList) DatanodeStatistics(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 2 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class BlockReportTestBase method testInterleavedBlockReports.

// See HDFS-10301
@Test(timeout = 300000)
public void testInterleavedBlockReports() throws IOException, ExecutionException, InterruptedException {
    int numConcurrentBlockReports = 3;
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    final String poolId = cluster.getNamesystem().getBlockPoolId();
    LOG.info("Block pool id: " + poolId);
    final DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    final StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
    // Get the list of storage ids associated with the datanode
    // before the test
    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    final DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().getDatanode(dn.getDatanodeId());
    DatanodeStorageInfo[] storageInfos = dnDescriptor.getStorageInfos();
    // Send the block report concurrently using
    // numThreads=numConcurrentBlockReports
    ExecutorService executorService = Executors.newFixedThreadPool(numConcurrentBlockReports);
    List<Future<Void>> futureList = new ArrayList<>(numConcurrentBlockReports);
    for (int i = 0; i < numConcurrentBlockReports; i++) {
        futureList.add(executorService.submit(new Callable<Void>() {

            @Override
            public Void call() throws IOException {
                sendBlockReports(dnR, poolId, reports);
                return null;
            }
        }));
    }
    for (Future<Void> future : futureList) {
        future.get();
    }
    executorService.shutdown();
    // Verify that the storages match before and after the test
    Assert.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Test(org.junit.Test)

Example 3 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestFSNamesystem method testReplQueuesActiveAfterStartupSafemode.

@Test
public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException {
    Configuration conf = new Configuration();
    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
    FSImage fsImage = Mockito.mock(FSImage.class);
    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
    FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
    FSNamesystem fsn = Mockito.spy(fsNamesystem);
    BlockManager bm = fsn.getBlockManager();
    Whitebox.setInternalState(bm, "namesystem", fsn);
    //Make shouldPopulaeReplQueues return true
    HAContext haContext = Mockito.mock(HAContext.class);
    HAState haState = Mockito.mock(HAState.class);
    Mockito.when(haContext.getState()).thenReturn(haState);
    Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
    Mockito.when(fsn.getHAContext()).thenReturn(haContext);
    //Make NameNode.getNameNodeMetrics() not return null
    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
    fsn.enterSafeMode(false);
    assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
    assertTrue("Replication queues were being populated during very first " + "safemode", !bm.isPopulatingReplQueues());
    fsn.leaveSafeMode(false);
    assertTrue("FSNamesystem didn't leave safemode", !fsn.isInSafeMode());
    assertTrue("Replication queues weren't being populated even after leaving " + "safemode", bm.isPopulatingReplQueues());
    fsn.enterSafeMode(false);
    assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
    assertTrue("Replication queues weren't being populated after entering " + "safemode 2nd time", bm.isPopulatingReplQueues());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) HAState(org.apache.hadoop.hdfs.server.namenode.ha.HAState) HAContext(org.apache.hadoop.hdfs.server.namenode.ha.HAContext) Test(org.junit.Test)

Example 4 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestDFSOutputStream method testNoLocalWriteFlag.

@Test
public void testNoLocalWriteFlag() throws IOException {
    DistributedFileSystem fs = cluster.getFileSystem();
    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.NO_LOCAL_WRITE, CreateFlag.CREATE);
    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    DatanodeManager dm = bm.getDatanodeManager();
    try (FSDataOutputStream os = fs.create(new Path("/test-no-local"), FsPermission.getDefault(), flags, 512, (short) 2, 512, null)) {
        // Inject a DatanodeManager that returns one DataNode as local node for
        // the client.
        DatanodeManager spyDm = spy(dm);
        DatanodeDescriptor dn1 = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.LIVE).get(0);
        doReturn(dn1).when(spyDm).getDatanodeByHost("127.0.0.1");
        Whitebox.setInternalState(bm, "datanodeManager", spyDm);
        byte[] buf = new byte[512 * 16];
        new Random().nextBytes(buf);
        os.write(buf);
    } finally {
        Whitebox.setInternalState(bm, "datanodeManager", dm);
    }
    cluster.triggerBlockReports();
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    // Total number of DataNodes is 3.
    assertEquals(3, cluster.getAllBlockReports(bpid).size());
    int numDataNodesWithData = 0;
    for (Map<DatanodeStorage, BlockListAsLongs> dnBlocks : cluster.getAllBlockReports(bpid)) {
        for (BlockListAsLongs blocks : dnBlocks.values()) {
            if (blocks.getNumberOfBlocks() > 0) {
                numDataNodesWithData++;
                break;
            }
        }
    }
    // Verify that only one DN has no data.
    assertEquals(1, 3 - numDataNodesWithData);
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) Path(org.apache.hadoop.fs.Path) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) Random(java.util.Random) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 5 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestDFSRename method testRenameWithOverwrite.

/**
   * Check the blocks of dst file are cleaned after rename with overwrite
   * Restart NN to check the rename successfully
   */
@Test(timeout = 120000)
public void testRenameWithOverwrite() throws Exception {
    final short replFactor = 2;
    final long blockSize = 512;
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replFactor).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    try {
        long fileLen = blockSize * 3;
        String src = "/foo/src";
        String dst = "/foo/dst";
        Path srcPath = new Path(src);
        Path dstPath = new Path(dst);
        DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
        DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
        LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(), dst, 0, fileLen);
        BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).getBlockManager();
        assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) != null);
        dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
        assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) == null);
        // Restart NN and check the rename successfully
        cluster.restartNameNodes();
        assertFalse(dfs.exists(srcPath));
        assertTrue(dfs.exists(dstPath));
    } finally {
        if (dfs != null) {
            dfs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Test(org.junit.Test)

Aggregations

BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)47 Test (org.junit.Test)33 Path (org.apache.hadoop.fs.Path)21 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)13 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 IOException (java.io.IOException)11 Configuration (org.apache.hadoop.conf.Configuration)11 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)11 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)11 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 Block (org.apache.hadoop.hdfs.protocol.Block)8 FileNotFoundException (java.io.FileNotFoundException)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)7 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)7 FileSystem (org.apache.hadoop.fs.FileSystem)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)6