Search in sources :

Example 46 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class TestReadStripedFileWithDecoding method testInvalidateBlock.

@Test
public void testInvalidateBlock() throws IOException {
    final Path file = new Path("/invalidate");
    final int length = 10;
    final byte[] bytes = StripedFileTestUtil.generateBytes(length);
    DFSTestUtil.writeFile(fs, file, bytes);
    int dnIndex = findFirstDataNode(file, cellSize * dataBlocks);
    Assert.assertNotEquals(-1, dnIndex);
    LocatedStripedBlock slb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
    final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(slb, cellSize, dataBlocks, parityBlocks);
    final Block b = blks[0].getBlock().getLocalBlock();
    DataNode dn = cluster.getDataNodes().get(dnIndex);
    // disable the heartbeat from DN so that the invalidated block record is kept
    // in NameNode until heartbeat expires and NN mark the dn as dead
    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    try {
        // delete the file
        fs.delete(file, true);
        // check the block is added to invalidateBlocks
        final FSNamesystem fsn = cluster.getNamesystem();
        final BlockManager bm = fsn.getBlockManager();
        DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
        Assert.assertTrue(bm.containsInvalidateBlock(blks[0].getLocations()[0], b) || dnd.containsInvalidateBlock(b));
    } finally {
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 47 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class TestSafeMode method testRbwBlocksNotConsideredUnderReplicated.

/**
   * Test that, when under-replicated blocks are processed at the end of
   * safe-mode, blocks currently under construction are not considered
   * under-construction or missing. Regression test for HDFS-2822.
   */
@Test
public void testRbwBlocksNotConsideredUnderReplicated() throws IOException {
    List<FSDataOutputStream> stms = Lists.newArrayList();
    try {
        // Create some junk blocks so that the NN doesn't just immediately
        // exit safemode on restart.
        DFSTestUtil.createFile(fs, new Path("/junk-blocks"), BLOCK_SIZE * 4, (short) 1, 1L);
        // hide this bug from the test!
        for (int i = 0; i < 10; i++) {
            FSDataOutputStream stm = fs.create(new Path("/append-" + i), true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
            stms.add(stm);
            stm.write(1);
            stm.hflush();
        }
        cluster.restartNameNode();
        FSNamesystem ns = cluster.getNameNode(0).getNamesystem();
        BlockManagerTestUtil.updateState(ns.getBlockManager());
        assertEquals(0, ns.getPendingReplicationBlocks());
        assertEquals(0, ns.getCorruptReplicaBlocks());
        assertEquals(0, ns.getMissingBlocksCount());
    } finally {
        for (FSDataOutputStream stm : stms) {
            IOUtils.closeStream(stm);
        }
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 48 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class TestBlockManager method testBlockManagerMachinesArray.

@Test
public void testBlockManagerMachinesArray() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    cluster.waitActive();
    BlockManager blockManager = cluster.getNamesystem().getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    final Path filePath = new Path("/tmp.txt");
    final long fileLen = 1L;
    DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L);
    ArrayList<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 4);
    FSNamesystem ns = cluster.getNamesystem();
    // get the block
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    File storageDir = cluster.getInstanceStorageDir(0, 0);
    File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
    assertTrue("Data directory does not exist", dataDir.exists());
    BlockInfo blockInfo = blockManager.blocksMap.getBlocks().iterator().next();
    ExtendedBlock blk = new ExtendedBlock(bpid, blockInfo.getBlockId(), blockInfo.getNumBytes(), blockInfo.getGenerationStamp());
    DatanodeDescriptor failedStorageDataNode = blockManager.getStoredBlock(blockInfo).getDatanode(0);
    DatanodeDescriptor corruptStorageDataNode = blockManager.getStoredBlock(blockInfo).getDatanode(1);
    ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
    for (int i = 0; i < failedStorageDataNode.getStorageInfos().length; i++) {
        DatanodeStorageInfo storageInfo = failedStorageDataNode.getStorageInfos()[i];
        DatanodeStorage dns = new DatanodeStorage(failedStorageDataNode.getStorageInfos()[i].getStorageID(), DatanodeStorage.State.FAILED, failedStorageDataNode.getStorageInfos()[i].getStorageType());
        while (storageInfo.getBlockIterator().hasNext()) {
            BlockInfo blockInfo1 = storageInfo.getBlockIterator().next();
            if (blockInfo1.equals(blockInfo)) {
                StorageReport report = new StorageReport(dns, true, storageInfo.getCapacity(), storageInfo.getDfsUsed(), storageInfo.getRemaining(), storageInfo.getBlockPoolUsed(), 0L);
                reports.add(report);
                break;
            }
        }
    }
    failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport.EMPTY_ARRAY), 0L, 0L, 0, 0, null);
    ns.writeLock();
    DatanodeStorageInfo corruptStorageInfo = null;
    for (int i = 0; i < corruptStorageDataNode.getStorageInfos().length; i++) {
        corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
        while (corruptStorageInfo.getBlockIterator().hasNext()) {
            BlockInfo blockInfo1 = corruptStorageInfo.getBlockIterator().next();
            if (blockInfo1.equals(blockInfo)) {
                break;
            }
        }
    }
    blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode, corruptStorageInfo.getStorageID(), CorruptReplicasMap.Reason.ANY.toString());
    ns.writeUnlock();
    BlockInfo[] blockInfos = new BlockInfo[] { blockInfo };
    ns.readLock();
    LocatedBlocks locatedBlocks = blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L, false, false, null, null);
    assertTrue("Located Blocks should exclude corrupt" + "replicas and failed storages", locatedBlocks.getLocatedBlocks().size() == 1);
    ns.readUnlock();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) TestINodeFile(org.apache.hadoop.hdfs.server.namenode.TestINodeFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) File(java.io.File) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 49 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class TestBlockManager method testStorageWithRemainingCapacity.

/**
   * Tests that a namenode doesn't choose a datanode with full disks to 
   * store blocks.
   * @throws Exception
   */
@Test
public void testStorageWithRemainingCapacity() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = FileSystem.get(conf);
    Path file1 = null;
    try {
        cluster.waitActive();
        final FSNamesystem namesystem = cluster.getNamesystem();
        final String poolId = namesystem.getBlockPoolId();
        final DatanodeRegistration nodeReg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
        //create a file with 100k.
        for (DatanodeStorageInfo storage : dd.getStorageInfos()) {
            storage.setUtilizationForTesting(65536, 0, 65536, 0);
        }
        //sum of the remaining capacity of both the storages
        dd.setRemaining(131072);
        file1 = new Path("testRemainingStorage.dat");
        try {
            DFSTestUtil.createFile(fs, file1, 102400, 102400, 102400, (short) 1, 0x1BAD5EED);
        } catch (RemoteException re) {
            GenericTestUtils.assertExceptionContains("nodes instead of " + "minReplication", re);
        }
    } finally {
        // Clean up
        assertTrue(fs.exists(file1));
        fs.delete(file1, true);
        assertTrue(!fs.exists(file1));
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RemoteException(org.apache.hadoop.ipc.RemoteException) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 50 with FSNamesystem

use of org.apache.hadoop.hdfs.server.namenode.FSNamesystem in project hadoop by apache.

the class TestPendingReconstruction method testProcessPendingReconstructions.

/* Test that processpendingReconstructions will use the most recent
 * blockinfo from the blocksmap by placing a larger genstamp into
 * the blocksmap.
 */
@Test
public void testProcessPendingReconstructions() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
    MiniDFSCluster cluster = null;
    Block block;
    BlockInfo blockInfo;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
        cluster.waitActive();
        FSNamesystem fsn = cluster.getNamesystem();
        BlockManager blkManager = fsn.getBlockManager();
        PendingReconstructionBlocks pendingReconstruction = blkManager.pendingReconstruction;
        LowRedundancyBlocks neededReconstruction = blkManager.neededReconstruction;
        BlocksMap blocksMap = blkManager.blocksMap;
        //
        // Add 1 block to pendingReconstructions with GenerationStamp = 0.
        //
        block = new Block(1, 1, 0);
        blockInfo = new BlockInfoContiguous(block, (short) 3);
        pendingReconstruction.increment(blockInfo, DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(1)));
        BlockCollection bc = Mockito.mock(BlockCollection.class);
        // Place into blocksmap with GenerationStamp = 1
        blockInfo.setGenerationStamp(1);
        blocksMap.addBlockCollection(blockInfo, bc);
        assertEquals("Size of pendingReconstructions ", 1, pendingReconstruction.size());
        // Add a second block to pendingReconstructions that has no
        // corresponding entry in blocksmap
        block = new Block(2, 2, 0);
        blockInfo = new BlockInfoContiguous(block, (short) 3);
        pendingReconstruction.increment(blockInfo, DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(1)));
        // verify 2 blocks in pendingReconstructions
        assertEquals("Size of pendingReconstructions ", 2, pendingReconstruction.size());
        //
        while (pendingReconstruction.size() > 0) {
            try {
                Thread.sleep(100);
            } catch (Exception e) {
            }
        }
        //
        while (neededReconstruction.size() == 0) {
            try {
                Thread.sleep(100);
            } catch (Exception e) {
            }
        }
        // is now 1
        for (Block b : neededReconstruction) {
            assertEquals("Generation stamp is 1 ", 1, b.getGenerationStamp());
        }
        // Verify size of neededReconstruction is exactly 1.
        assertEquals("size of neededReconstruction is 1 ", 1, neededReconstruction.size());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)77 Test (org.junit.Test)59 Path (org.apache.hadoop.fs.Path)51 FileSystem (org.apache.hadoop.fs.FileSystem)41 Configuration (org.apache.hadoop.conf.Configuration)37 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)25 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)23 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)14 ArrayList (java.util.ArrayList)12 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)12 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)6 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)6 File (java.io.File)5 IOException (java.io.IOException)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5