Search in sources :

Example 1 with ReplicationResult

use of org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult in project hadoop by apache.

the class TestFsck method testFsckFileNotFound.

/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
    // Number of replicas to actually start
    final short numReplicas = 1;
    NameNode namenode = mock(NameNode.class);
    NetworkTopology nettop = mock(NetworkTopology.class);
    Map<String, String[]> pmap = new HashMap<>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    FSNamesystem fsName = mock(FSNamesystem.class);
    FSDirectory fsd = mock(FSDirectory.class);
    BlockManager blockManager = mock(BlockManager.class);
    DatanodeManager dnManager = mock(DatanodeManager.class);
    INodesInPath iip = mock(INodesInPath.class);
    when(namenode.getNamesystem()).thenReturn(fsName);
    when(fsName.getBlockManager()).thenReturn(blockManager);
    when(fsName.getFSDirectory()).thenReturn(fsd);
    when(fsd.getFSNamesystem()).thenReturn(fsName);
    when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
    when(blockManager.getDatanodeManager()).thenReturn(dnManager);
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
    String pathString = "/tmp/testFile";
    long length = 123L;
    boolean isDir = false;
    int blockReplication = 1;
    long blockSize = 128 * 1024L;
    long modTime = 123123123L;
    long accessTime = 123123120L;
    FsPermission perms = FsPermission.getDefault();
    String owner = "foo";
    String group = "bar";
    byte[] symlink = null;
    byte[] path = DFSUtil.string2Bytes(pathString);
    long fileId = 312321L;
    int numChildren = 1;
    byte storagePolicy = 0;
    HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    try {
        fsck.check(pathString, file, replRes, ecRes);
    } catch (Exception e) {
        fail("Unexpected exception " + e.getMessage());
    }
    assertTrue(replRes.isHealthy());
}
Also used : DirOp(org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PrintWriter(java.io.PrintWriter) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) NetworkTopology(org.apache.hadoop.net.NetworkTopology) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Test(org.junit.Test)

Example 2 with ReplicationResult

use of org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult in project hadoop by apache.

the class TestFsck method testFsckMissingReplicas.

/**
   * Tests that the # of missing block replicas and expected replicas is
   * correct.
   * @throws IOException
   */
@Test
public void testFsckMissingReplicas() throws IOException {
    // Desired replication factor
    // Set this higher than numReplicas so it's under-replicated
    final short replFactor = 2;
    // Number of replicas to actually start
    final short numReplicas = 1;
    // Number of blocks to write
    final short numBlocks = 3;
    // Set a small-ish blocksize
    final long blockSize = 512;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    DistributedFileSystem dfs = null;
    // Startup a minicluster
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * numBlocks;
    DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
    Map<String, String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
    // Run the fsck and check the Result
    final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    fsck.check(pathString, file, replRes, ecRes);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(replRes.missingReplicas, (numBlocks * replFactor) - (numBlocks * numReplicas));
    assertEquals(replRes.numExpectedReplicas, numBlocks * replFactor);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) NetworkTopology(org.apache.hadoop.net.NetworkTopology) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) PrintWriter(java.io.PrintWriter) Test(org.junit.Test)

Example 3 with ReplicationResult

use of org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult in project hadoop by apache.

the class TestFsck method testFsckMisPlacedReplicas.

/**
   * Tests that the # of misreplaced replicas is correct.
   * @throws IOException
   */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
    // Desired replication factor
    final short replFactor = 2;
    // Number of replicas to actually start
    short numDn = 2;
    // Number of blocks to write
    final short numBlocks = 3;
    // Set a small-ish blocksize
    final long blockSize = 512;
    String[] racks = { "/rack1", "/rack1" };
    String[] hosts = { "host1", "host2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    DistributedFileSystem dfs = null;
    // Startup a minicluster
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * numBlocks;
    DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    numDn++;
    Map<String, String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numDn, remoteAddress);
    // Run the fsck and check the Result
    final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    fsck.check(pathString, file, replRes, ecRes);
    // check misReplicatedBlock number.
    assertEquals(replRes.numMisReplicatedBlocks, numBlocks);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) NetworkTopology(org.apache.hadoop.net.NetworkTopology) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) PrintWriter(java.io.PrintWriter) Test(org.junit.Test)

Aggregations

PrintWriter (java.io.PrintWriter)3 StringWriter (java.io.StringWriter)3 Writer (java.io.Writer)3 InetAddress (java.net.InetAddress)3 HashMap (java.util.HashMap)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 ErasureCodingResult (org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult)3 ReplicationResult (org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult)3 Result (org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result)3 HostsFileWriter (org.apache.hadoop.hdfs.util.HostsFileWriter)3 NetworkTopology (org.apache.hadoop.net.NetworkTopology)3 Test (org.junit.Test)3 Matchers.anyString (org.mockito.Matchers.anyString)3 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 TimeoutException (java.util.concurrent.TimeoutException)1 ChecksumException (org.apache.hadoop.fs.ChecksumException)1