Search in sources :

Example 1 with DFSTestUtil

use of org.apache.hadoop.hdfs.DFSTestUtil in project hadoop by apache.

the class TestFsck method testFsckOpenFiles.

@Test
public void testFsckOpenFiles() throws Exception {
    DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(4).build();
    FileSystem fs = null;
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    String topDir = "/srcdat";
    String randomString = "HADOOP  ";
    fs = cluster.getFileSystem();
    cluster.waitActive();
    util.createFiles(fs, topDir);
    util.waitReplication(fs, topDir, (short) 3);
    String outStr = runFsck(conf, 0, true, "/");
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // Open a file for writing and do not close for now
    Path openFile = new Path(topDir + "/openFile");
    FSDataOutputStream out = fs.create(openFile);
    int writeCount = 0;
    while (writeCount != 100) {
        out.write(randomString.getBytes());
        writeCount++;
    }
    ((DFSOutputStream) out.getWrappedStream()).hflush();
    // We expect the filesystem to be HEALTHY and show one open file
    outStr = runFsck(conf, 0, true, topDir);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    assertFalse(outStr.contains("OPENFORWRITE"));
    // Use -openforwrite option to list open files
    outStr = runFsck(conf, 0, true, topDir, "-files", "-blocks", "-locations", "-openforwrite");
    System.out.println(outStr);
    assertTrue(outStr.contains("OPENFORWRITE"));
    assertTrue(outStr.contains("Under Construction Block:"));
    assertTrue(outStr.contains("openFile"));
    // Close the file
    out.close();
    // Now, fsck should show HEALTHY fs and should not show any open files
    outStr = runFsck(conf, 0, true, topDir);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    assertFalse(outStr.contains("OPENFORWRITE"));
    assertFalse(outStr.contains("Under Construction Block:"));
    util.cleanup(fs, topDir);
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Matchers.anyString(org.mockito.Matchers.anyString) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 2 with DFSTestUtil

use of org.apache.hadoop.hdfs.DFSTestUtil in project hadoop by apache.

the class TestFsck method testBlockIdCK.

/**
   * Test for blockIdCK.
   */
@Test
public void testBlockIdCK() throws Exception {
    final short replFactor = 2;
    short numDn = 2;
    final long blockSize = 512;
    String[] racks = { "/rack1", "/rack2" };
    String[] hosts = { "host1", "host2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
    DistributedFileSystem dfs = null;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, replFactor, 1000L);
    util.waitReplication(dfs, path, replFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
        sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
    }
    String[] bIds = sb.toString().split(" ");
    //run fsck
    //illegal input test
    String runFsckResult = runFsck(conf, 0, true, "/", "-blockId", "not_a_block_id");
    assertTrue(runFsckResult.contains("Incorrect blockId format:"));
    //general test
    runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
    assertTrue(runFsckResult.contains(bIds[0]));
    assertTrue(runFsckResult.contains(bIds[1]));
    assertTrue(runFsckResult.contains("Block replica on datanode/rack: host1/rack1 is HEALTHY"));
    assertTrue(runFsckResult.contains("Block replica on datanode/rack: host2/rack2 is HEALTHY"));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 3 with DFSTestUtil

use of org.apache.hadoop.hdfs.DFSTestUtil in project hadoop by apache.

the class TestFsck method testFsckNonExistent.

@Test
public void testFsckNonExistent() throws Exception {
    DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(20).build();
    FileSystem fs = null;
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    util.createFiles(fs, "/srcdat");
    util.waitReplication(fs, "/srcdat", (short) 3);
    String outStr = runFsck(conf, 0, true, "/non-existent");
    assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
    System.out.println(outStr);
    util.cleanup(fs, "/srcdat");
}
Also used : DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Matchers.anyString(org.mockito.Matchers.anyString) Test(org.junit.Test)

Example 4 with DFSTestUtil

use of org.apache.hadoop.hdfs.DFSTestUtil in project hadoop by apache.

the class TestFsck method testFsckListCorruptFilesBlocks.

/** check if option -list-corruptfiles of fsck command works properly. */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    FileSystem fs = null;
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i = 0; i < 4; i++) {
        for (int j = 0; j <= 1; j++) {
            File storageDir = cluster.getInstanceStorageDir(i, j);
            File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
            List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
            if (metadataFiles == null) {
                continue;
            }
            for (File metadataFile : metadataFiles) {
                File blockFile = Block.metaToBlockFile(metadataFile);
                assertTrue("Cannot remove file.", blockFile.delete());
                assertTrue("Cannot remove file.", metadataFile.delete());
            }
        }
    }
    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));
    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
}
Also used : DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) CorruptFileBlocks(org.apache.hadoop.hdfs.protocol.CorruptFileBlocks) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Matchers.anyString(org.mockito.Matchers.anyString) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 5 with DFSTestUtil

use of org.apache.hadoop.hdfs.DFSTestUtil in project hadoop by apache.

the class TestFsck method testBlockIdCKMaintenance.

/**
   * Test for blockIdCK with datanode maintenance.
   */
@Test(timeout = 90000)
public void testBlockIdCKMaintenance() throws Exception {
    final short replFactor = 2;
    short numDn = 2;
    final long blockSize = 512;
    String[] hosts = { "host1", "host2" };
    String[] racks = { "/rack1", "/rack2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY, replFactor);
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, replFactor, 1000L);
    util.waitReplication(dfs, path, replFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
        sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
    }
    String[] bIds = sb.toString().split(" ");
    //make sure datanode that has replica is fine before maintenance
    String outStr = runFsck(conf, 0, true, "/", "-maintenance", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
    BlockManager bm = fsn.getBlockManager();
    DatanodeManager dnm = bm.getDatanodeManager();
    DatanodeDescriptor dn = dnm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
    bm.getDatanodeManager().getDecomManager().startMaintenance(dn, Long.MAX_VALUE);
    final String dnName = dn.getXferAddr();
    //wait for the node to enter maintenance state
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            DatanodeInfo datanodeInfo = null;
            try {
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isEnteringMaintenance()) {
                    String fsckOut = runFsck(conf, 5, false, "/", "-maintenance", "-blockId", bIds[0]);
                    assertTrue(fsckOut.contains(NamenodeFsck.ENTERING_MAINTENANCE_STATUS));
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    // Start 3rd DataNode
    cluster.startDataNodes(conf, 1, true, null, new String[] { "/rack3" }, new String[] { "host3" }, null, false);
    // Wait for 1st node to reach in maintenance state
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            try {
                DatanodeInfo datanodeInfo = null;
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isInMaintenance()) {
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    //check in maintenance node
    String fsckOut = runFsck(conf, 4, false, "/", "-maintenance", "-blockId", bIds[0]);
    assertTrue(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
    //check in maintenance node are not printed when not requested
    fsckOut = runFsck(conf, 4, false, "/", "-blockId", bIds[0]);
    assertFalse(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Aggregations

DFSTestUtil (org.apache.hadoop.hdfs.DFSTestUtil)25 Test (org.junit.Test)25 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)23 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)22 Path (org.apache.hadoop.fs.Path)17 Matchers.anyString (org.mockito.Matchers.anyString)17 FileSystem (org.apache.hadoop.fs.FileSystem)16 File (java.io.File)10 RandomAccessFile (java.io.RandomAccessFile)10 Configuration (org.apache.hadoop.conf.Configuration)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 IOException (java.io.IOException)7 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 FileNotFoundException (java.io.FileNotFoundException)4 Random (java.util.Random)4 TimeoutException (java.util.concurrent.TimeoutException)4 ChecksumException (org.apache.hadoop.fs.ChecksumException)4 UnresolvedLinkException (org.apache.hadoop.fs.UnresolvedLinkException)4 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4