Search in sources :

Example 1 with CorruptFileBlocks

use of org.apache.hadoop.hdfs.protocol.CorruptFileBlocks in project hadoop by apache.

the class CorruptFileBlockIterator method loadNext.

private void loadNext() throws IOException {
    if (files == null || fileIdx >= files.length) {
        CorruptFileBlocks cfb = dfs.listCorruptFileBlocks(path, cookie);
        files = cfb.getFiles();
        cookie = cfb.getCookie();
        fileIdx = 0;
        callsMade++;
    }
    if (fileIdx >= files.length) {
        // received an empty response
        // there are no more corrupt file blocks
        nextPath = null;
    } else {
        nextPath = string2Path(files[fileIdx]);
        fileIdx++;
    }
}
Also used : CorruptFileBlocks(org.apache.hadoop.hdfs.protocol.CorruptFileBlocks)

Example 2 with CorruptFileBlocks

use of org.apache.hadoop.hdfs.protocol.CorruptFileBlocks in project hadoop by apache.

the class TestFsck method testFsckListCorruptFilesBlocks.

/** check if option -list-corruptfiles of fsck command works properly. */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    FileSystem fs = null;
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i = 0; i < 4; i++) {
        for (int j = 0; j <= 1; j++) {
            File storageDir = cluster.getInstanceStorageDir(i, j);
            File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
            List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
            if (metadataFiles == null) {
                continue;
            }
            for (File metadataFile : metadataFiles) {
                File blockFile = Block.metaToBlockFile(metadataFile);
                assertTrue("Cannot remove file.", blockFile.delete());
                assertTrue("Cannot remove file.", metadataFile.delete());
            }
        }
    }
    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));
    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
}
Also used : DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) CorruptFileBlocks(org.apache.hadoop.hdfs.protocol.CorruptFileBlocks) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Matchers.anyString(org.mockito.Matchers.anyString) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 3 with CorruptFileBlocks

use of org.apache.hadoop.hdfs.protocol.CorruptFileBlocks in project hadoop by apache.

the class TestFsck method testFsckListCorruptSnapshotFiles.

/**
   * Test that corrupted snapshot files are listed with full dir.
   */
@Test
public void testFsckListCorruptSnapshotFiles() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    DistributedFileSystem hdfs = null;
    final short replFactor = 1;
    int numFiles = 3;
    int numSnapshots = 0;
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    hdfs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(numFiles).setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(hdfs, "/corruptData", (short) 1);
    final Path fp = new Path("/corruptData/file");
    util.createFile(hdfs, fp, 1024, replFactor, 1000L);
    numFiles++;
    util.waitReplication(hdfs, "/corruptData", (short) 1);
    hdfs.allowSnapshot(new Path("/corruptData"));
    hdfs.createSnapshot(new Path("/corruptData"), "mySnapShot");
    numSnapshots = numFiles;
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i = 0; i < numFiles; i++) {
        for (int j = 0; j <= 1; j++) {
            File storageDir = cluster.getInstanceStorageDir(i, j);
            File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
            List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
            if (metadataFiles == null) {
                continue;
            }
            for (File metadataFile : metadataFiles) {
                File blockFile = Block.metaToBlockFile(metadataFile);
                assertTrue("Cannot remove file.", blockFile.delete());
                assertTrue("Cannot remove file.", metadataFile.delete());
            }
        }
    }
    // Delete file when it has a snapshot
    hdfs.delete(fp, false);
    numFiles--;
    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode.listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
    }
    // with -includeSnapshots all files are reported
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks", "-includeSnapshots");
    System.out.println("2. bad fsck include snapshot out: " + outStr);
    assertTrue(outStr.contains("has " + (numFiles + numSnapshots) + " CORRUPT files"));
    assertTrue(outStr.contains("/.snapshot/"));
    // without -includeSnapshots only non-snapshots are reported
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("3. bad fsck exclude snapshot out: " + outStr);
    assertTrue(outStr.contains("has " + numFiles + " CORRUPT files"));
    assertFalse(outStr.contains("/.snapshot/"));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) CorruptFileBlocks(org.apache.hadoop.hdfs.protocol.CorruptFileBlocks) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Aggregations

CorruptFileBlocks (org.apache.hadoop.hdfs.protocol.CorruptFileBlocks)3 File (java.io.File)2 RandomAccessFile (java.io.RandomAccessFile)2 DFSTestUtil (org.apache.hadoop.hdfs.DFSTestUtil)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)2 Test (org.junit.Test)2 Matchers.anyString (org.mockito.Matchers.anyString)2 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1