Search in sources :

Example 1 with NetworkTopology

use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.

the class TestBlockStoragePolicy method testChooseTargetWithTopology.

@Test
public void testChooseTargetWithTopology() throws Exception {
    BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[] { StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
    BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2", new StorageType[] { StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
    final String[] racks = { "/d1/r1", "/d1/r2", "/d1/r2" };
    final String[] hosts = { "host1", "host2", "host3" };
    final StorageType[] types = { StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE };
    final DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, types);
    final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
    DFSTestUtil.formatNameNode(conf);
    NameNode namenode = new NameNode(conf);
    final BlockManager bm = namenode.getNamesystem().getBlockManager();
    BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
    NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
    for (DatanodeDescriptor datanode : dataNodes) {
        cluster.add(datanode);
    }
    DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy1, null);
    System.out.println(Arrays.asList(targets));
    Assert.assertEquals(3, targets.length);
    targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy2, null);
    System.out.println(Arrays.asList(targets));
    Assert.assertEquals(3, targets.length);
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) StorageType(org.apache.hadoop.fs.StorageType) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Node(org.apache.hadoop.net.Node) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) NetworkTopology(org.apache.hadoop.net.NetworkTopology) File(java.io.File) Test(org.junit.Test)

Example 2 with NetworkTopology

use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.

the class TestFsck method testFsckFileNotFound.

/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
    // Number of replicas to actually start
    final short numReplicas = 1;
    NameNode namenode = mock(NameNode.class);
    NetworkTopology nettop = mock(NetworkTopology.class);
    Map<String, String[]> pmap = new HashMap<>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    FSNamesystem fsName = mock(FSNamesystem.class);
    FSDirectory fsd = mock(FSDirectory.class);
    BlockManager blockManager = mock(BlockManager.class);
    DatanodeManager dnManager = mock(DatanodeManager.class);
    INodesInPath iip = mock(INodesInPath.class);
    when(namenode.getNamesystem()).thenReturn(fsName);
    when(fsName.getBlockManager()).thenReturn(blockManager);
    when(fsName.getFSDirectory()).thenReturn(fsd);
    when(fsd.getFSNamesystem()).thenReturn(fsName);
    when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
    when(blockManager.getDatanodeManager()).thenReturn(dnManager);
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
    String pathString = "/tmp/testFile";
    long length = 123L;
    boolean isDir = false;
    int blockReplication = 1;
    long blockSize = 128 * 1024L;
    long modTime = 123123123L;
    long accessTime = 123123120L;
    FsPermission perms = FsPermission.getDefault();
    String owner = "foo";
    String group = "bar";
    byte[] symlink = null;
    byte[] path = DFSUtil.string2Bytes(pathString);
    long fileId = 312321L;
    int numChildren = 1;
    byte storagePolicy = 0;
    HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    try {
        fsck.check(pathString, file, replRes, ecRes);
    } catch (Exception e) {
        fail("Unexpected exception " + e.getMessage());
    }
    assertTrue(replRes.isHealthy());
}
Also used : DirOp(org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PrintWriter(java.io.PrintWriter) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) NetworkTopology(org.apache.hadoop.net.NetworkTopology) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Test(org.junit.Test)

Example 3 with NetworkTopology

use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.

the class TestFsck method testFsckMissingReplicas.

/**
   * Tests that the # of missing block replicas and expected replicas is
   * correct.
   * @throws IOException
   */
@Test
public void testFsckMissingReplicas() throws IOException {
    // Desired replication factor
    // Set this higher than numReplicas so it's under-replicated
    final short replFactor = 2;
    // Number of replicas to actually start
    final short numReplicas = 1;
    // Number of blocks to write
    final short numBlocks = 3;
    // Set a small-ish blocksize
    final long blockSize = 512;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    DistributedFileSystem dfs = null;
    // Startup a minicluster
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * numBlocks;
    DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
    Map<String, String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
    // Run the fsck and check the Result
    final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    fsck.check(pathString, file, replRes, ecRes);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(replRes.missingReplicas, (numBlocks * replFactor) - (numBlocks * numReplicas));
    assertEquals(replRes.numExpectedReplicas, numBlocks * replFactor);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) NetworkTopology(org.apache.hadoop.net.NetworkTopology) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) PrintWriter(java.io.PrintWriter) Test(org.junit.Test)

Example 4 with NetworkTopology

use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.

the class TestBlockManager method removeNode.

private void removeNode(DatanodeDescriptor deadNode) {
    NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
    cluster.remove(deadNode);
    bm.removeBlocksAssociatedTo(deadNode);
}
Also used : NetworkTopology(org.apache.hadoop.net.NetworkTopology)

Example 5 with NetworkTopology

use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.

the class BlockManagerSafeMode method leaveSafeMode.

/**
   * Leave start up safe mode.
   *
   * @param force - true to force exit
   * @return true if it leaves safe mode successfully else false
   */
boolean leaveSafeMode(boolean force) {
    assert namesystem.hasWriteLock() : "Leaving safe mode needs write lock!";
    final long bytesInFuture = numberOfBytesInFutureBlocks.get();
    if (bytesInFuture > 0) {
        if (force) {
            LOG.warn("Leaving safe mode due to forceExit. This will cause a data " + "loss of {} byte(s).", bytesInFuture);
            numberOfBytesInFutureBlocks.set(0);
        } else {
            LOG.error("Refusing to leave safe mode without a force flag. " + "Exiting safe mode will cause a deletion of {} byte(s). Please " + "use -forceExit flag to exit safe mode forcefully if data loss is" + " acceptable.", bytesInFuture);
            return false;
        }
    } else if (force) {
        LOG.warn("forceExit used when normal exist would suffice. Treating " + "force exit as normal safe mode exit.");
    }
    // In the standby, do not populate repl queues
    if (!blockManager.isPopulatingReplQueues() && blockManager.shouldPopulateReplQueues()) {
        blockManager.initializeReplQueues();
    }
    if (status != BMSafeModeStatus.OFF) {
        NameNode.stateChangeLog.info("STATE* Safe mode is OFF");
    }
    status = BMSafeModeStatus.OFF;
    final long timeInSafemode = monotonicNow() - startTime;
    NameNode.stateChangeLog.info("STATE* Leaving safe mode after {} secs", timeInSafemode / 1000);
    NameNode.getNameNodeMetrics().setSafeModeTime(timeInSafemode);
    final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
    NameNode.stateChangeLog.info("STATE* Network topology has {} racks and {}" + " datanodes", nt.getNumOfRacks(), nt.getNumOfLeaves());
    NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has {} blocks", blockManager.numOfUnderReplicatedBlocks());
    namesystem.startSecretManagerIfNecessary();
    // If startup has not yet completed, end safemode phase.
    StartupProgress prog = NameNode.getStartupProgress();
    if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
        prog.endStep(Phase.SAFEMODE, BlockManagerSafeMode.STEP_AWAITING_REPORTED_BLOCKS);
        prog.endPhase(Phase.SAFEMODE);
    }
    return true;
}
Also used : NetworkTopology(org.apache.hadoop.net.NetworkTopology) StartupProgress(org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress)

Aggregations

NetworkTopology (org.apache.hadoop.net.NetworkTopology)12 Test (org.junit.Test)7 Path (org.apache.hadoop.fs.Path)4 PrintWriter (java.io.PrintWriter)3 StringWriter (java.io.StringWriter)3 Writer (java.io.Writer)3 InetAddress (java.net.InetAddress)3 HashMap (java.util.HashMap)3 StorageType (org.apache.hadoop.fs.StorageType)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 ErasureCodingResult (org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult)3 ReplicationResult (org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult)3 Result (org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result)3 HostsFileWriter (org.apache.hadoop.hdfs.util.HostsFileWriter)3 Matchers.anyString (org.mockito.Matchers.anyString)3 File (java.io.File)2 IOException (java.io.IOException)2 BlockLocation (org.apache.hadoop.fs.BlockLocation)2