Search in sources :

Example 31 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestFileCreation method testFileCreationWithOverwrite.

/**
   * 1. Check the blocks of old file are cleaned after creating with overwrite
   * 2. Restart NN, check the file
   * 3. Save new checkpoint and restart NN, check the file
   */
@Test(timeout = 120000)
public void testFileCreationWithOverwrite() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt("dfs.blocksize", blockSize);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    try {
        dfs.mkdirs(new Path("/foo/dir"));
        String file = "/foo/dir/file";
        Path filePath = new Path(file);
        // Case 1: Create file with overwrite, check the blocks of old file
        // are cleaned after creating with overwrite
        NameNode nn = cluster.getNameNode();
        FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
        BlockManager bm = fsn.getBlockManager();
        FSDataOutputStream out = dfs.create(filePath);
        byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
        try {
            out.write(oldData);
        } finally {
            out.close();
        }
        LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
        assertBlocks(bm, oldBlocks, true);
        out = dfs.create(filePath, true);
        byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
        try {
            out.write(newData);
        } finally {
            out.close();
        }
        dfs.deleteOnExit(filePath);
        LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
        assertBlocks(bm, newBlocks, true);
        assertBlocks(bm, oldBlocks, false);
        FSDataInputStream in = dfs.open(filePath);
        byte[] result = null;
        try {
            result = readAll(in);
        } finally {
            in.close();
        }
        Assert.assertArrayEquals(newData, result);
        // Case 2: Restart NN, check the file
        cluster.restartNameNode();
        nn = cluster.getNameNode();
        in = dfs.open(filePath);
        try {
            result = readAll(in);
        } finally {
            in.close();
        }
        Assert.assertArrayEquals(newData, result);
        // Case 3: Save new checkpoint and restart NN, check the file
        NameNodeAdapter.enterSafeMode(nn, false);
        NameNodeAdapter.saveNamespace(nn);
        cluster.restartNameNode();
        nn = cluster.getNameNode();
        in = dfs.open(filePath);
        try {
            result = readAll(in);
        } finally {
            in.close();
        }
        Assert.assertArrayEquals(newData, result);
    } finally {
        if (dfs != null) {
            dfs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 32 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestFileCorruption method testCorruptionWithDiskFailure.

@Test
public void testCorruptionWithDiskFailure() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        BlockManager bm = cluster.getNamesystem().getBlockManager();
        FileSystem fs = cluster.getFileSystem();
        final Path FILE_PATH = new Path("/tmp.txt");
        final long FILE_LEN = 1L;
        DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short) 3, 1L);
        // get the block
        final String bpid = cluster.getNamesystem().getBlockPoolId();
        File storageDir = cluster.getInstanceStorageDir(0, 0);
        File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        assertTrue("Data directory does not exist", dataDir.exists());
        ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
        if (blk == null) {
            blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
        }
        assertFalse("Data directory does not contain any blocks or there was an" + " " + "IO error", blk == null);
        ArrayList<DataNode> datanodes = cluster.getDataNodes();
        assertEquals(datanodes.size(), 3);
        FSNamesystem ns = cluster.getNamesystem();
        //fail the storage on that node which has the block
        try {
            ns.writeLock();
            updateAllStorages(bm);
        } finally {
            ns.writeUnlock();
        }
        ns.writeLock();
        try {
            markAllBlocksAsCorrupt(bm, blk);
        } finally {
            ns.writeUnlock();
        }
        // open the file
        fs.open(FILE_PATH);
        //clean up
        fs.delete(FILE_PATH, false);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) File(java.io.File) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 33 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestMissingBlocksAlert method testMissingBlocksAlert.

@Test
public void testMissingBlocksAlert() throws IOException, InterruptedException, MalformedObjectNameException, AttributeNotFoundException, MBeanException, ReflectionException, InstanceNotFoundException {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new HdfsConfiguration();
        //minimize test delay
        conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 0);
        conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
        int fileLen = 10 * 1024;
        conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen / 2);
        //start a cluster with single datanode
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        final BlockManager bm = cluster.getNamesystem().getBlockManager();
        DistributedFileSystem dfs = cluster.getFileSystem();
        // create a normal file
        DFSTestUtil.createFile(dfs, new Path("/testMissingBlocksAlert/file1"), fileLen, (short) 3, 0);
        Path corruptFile = new Path("/testMissingBlocks/corruptFile");
        DFSTestUtil.createFile(dfs, corruptFile, fileLen, (short) 3, 0);
        // Corrupt the block
        ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, corruptFile);
        cluster.corruptReplica(0, block);
        // read the file so that the corrupt block is reported to NN
        FSDataInputStream in = dfs.open(corruptFile);
        try {
            in.readFully(new byte[fileLen]);
        } catch (ChecksumException ignored) {
        // checksum error is expected.      
        }
        in.close();
        LOG.info("Waiting for missing blocks count to increase...");
        while (dfs.getMissingBlocksCount() <= 0) {
            Thread.sleep(100);
        }
        assertTrue(dfs.getMissingBlocksCount() == 1);
        assertEquals(4, dfs.getUnderReplicatedBlocksCount());
        assertEquals(3, bm.getUnderReplicatedNotMissingBlocks());
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        Assert.assertEquals(1, (long) (Long) mbs.getAttribute(mxbeanName, "NumberOfMissingBlocks"));
        // now do the reverse : remove the file expect the number of missing 
        // blocks to go to zero
        dfs.delete(corruptFile, true);
        LOG.info("Waiting for missing blocks count to be zero...");
        while (dfs.getMissingBlocksCount() > 0) {
            Thread.sleep(100);
        }
        assertEquals(2, dfs.getUnderReplicatedBlocksCount());
        assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());
        Assert.assertEquals(0, (long) (Long) mbs.getAttribute(mxbeanName, "NumberOfMissingBlocks"));
        Path replOneFile = new Path("/testMissingBlocks/replOneFile");
        DFSTestUtil.createFile(dfs, replOneFile, fileLen, (short) 1, 0);
        ExtendedBlock replOneBlock = DFSTestUtil.getFirstBlock(dfs, replOneFile);
        cluster.corruptReplica(0, replOneBlock);
        // read the file so that the corrupt block is reported to NN
        in = dfs.open(replOneFile);
        try {
            in.readFully(new byte[fileLen]);
        } catch (ChecksumException ignored) {
        // checksum error is expected.
        }
        in.close();
        assertEquals(1, dfs.getMissingReplOneBlocksCount());
        Assert.assertEquals(1, (long) (Long) mbs.getAttribute(mxbeanName, "NumberOfMissingBlocksWithReplicationFactorOne"));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) ChecksumException(org.apache.hadoop.fs.ChecksumException) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Example 34 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestReadStripedFileWithDecoding method testInvalidateBlock.

@Test
public void testInvalidateBlock() throws IOException {
    final Path file = new Path("/invalidate");
    final int length = 10;
    final byte[] bytes = StripedFileTestUtil.generateBytes(length);
    DFSTestUtil.writeFile(fs, file, bytes);
    int dnIndex = findFirstDataNode(file, cellSize * dataBlocks);
    Assert.assertNotEquals(-1, dnIndex);
    LocatedStripedBlock slb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
    final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(slb, cellSize, dataBlocks, parityBlocks);
    final Block b = blks[0].getBlock().getLocalBlock();
    DataNode dn = cluster.getDataNodes().get(dnIndex);
    // disable the heartbeat from DN so that the invalidated block record is kept
    // in NameNode until heartbeat expires and NN mark the dn as dead
    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    try {
        // delete the file
        fs.delete(file, true);
        // check the block is added to invalidateBlocks
        final FSNamesystem fsn = cluster.getNamesystem();
        final BlockManager bm = fsn.getBlockManager();
        DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
        Assert.assertTrue(bm.containsInvalidateBlock(blks[0].getLocations()[0], b) || dnd.containsInvalidateBlock(b));
    } finally {
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 35 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestDataNodeVolumeFailure method testUnderReplicationAfterVolFailure.

/**
   * Test that there are under replication blocks after vol failures
   */
@Test
public void testUnderReplicationAfterVolFailure() throws Exception {
    // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
    // volume failures which is currently not supported on Windows.
    assumeNotWindows();
    // Bring up one more datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.waitActive();
    final BlockManager bm = cluster.getNamesystem().getBlockManager();
    Path file1 = new Path("/test1");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 3, 1L);
    DFSTestUtil.waitReplication(fs, file1, (short) 3);
    // Fail the first volume on both datanodes
    File dn1Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
    File dn2Vol1 = new File(dataDir, "data" + (2 * 1 + 1));
    DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
    Path file2 = new Path("/test2");
    DFSTestUtil.createFile(fs, file2, 1024, (short) 3, 1L);
    DFSTestUtil.waitReplication(fs, file2, (short) 3);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            // underReplicatedBlocks are due to failed volumes
            int underReplicatedBlocks = BlockManagerTestUtil.checkHeartbeatAndGetUnderReplicatedBlocksCount(cluster.getNamesystem(), bm);
            if (underReplicatedBlocks > 0) {
                return true;
            }
            LOG.info("There is no under replicated block after volume failure.");
            return false;
        }
    }, 500, 60000);
}
Also used : Path(org.apache.hadoop.fs.Path) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) File(java.io.File) Test(org.junit.Test)

Aggregations

BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)47 Test (org.junit.Test)33 Path (org.apache.hadoop.fs.Path)21 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)13 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 IOException (java.io.IOException)11 Configuration (org.apache.hadoop.conf.Configuration)11 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)11 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)11 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 Block (org.apache.hadoop.hdfs.protocol.Block)8 FileNotFoundException (java.io.FileNotFoundException)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)7 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)7 FileSystem (org.apache.hadoop.fs.FileSystem)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)6