Search in sources :

Example 31 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class NamenodeWebHdfsMethods method chooseDatanode.

@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode, final String path, final HttpOpParam.Op op, final long openOffset, final long blocksize, final String excludeDatanodes, final String remoteAddr) throws IOException {
    FSNamesystem fsn = namenode.getNamesystem();
    if (fsn == null) {
        throw new IOException("Namesystem has not been intialized yet.");
    }
    final BlockManager bm = fsn.getBlockManager();
    HashSet<Node> excludes = new HashSet<Node>();
    if (excludeDatanodes != null) {
        for (String host : StringUtils.getTrimmedStringCollection(excludeDatanodes)) {
            int idx = host.indexOf(":");
            if (idx != -1) {
                excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
            } else {
                excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
            }
        }
    }
    if (op == PutOpParam.Op.CREATE) {
        //choose a datanode near to client 
        final DatanodeDescriptor clientNode = bm.getDatanodeManager().getDatanodeByHost(remoteAddr);
        if (clientNode != null) {
            final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(path, clientNode, excludes, blocksize);
            if (storages.length > 0) {
                return storages[0].getDatanodeDescriptor();
            }
        }
    } else if (op == GetOpParam.Op.OPEN || op == GetOpParam.Op.GETFILECHECKSUM || op == PostOpParam.Op.APPEND) {
        //choose a datanode containing a replica 
        final NamenodeProtocols np = getRPCServer(namenode);
        final HdfsFileStatus status = np.getFileInfo(path);
        if (status == null) {
            throw new FileNotFoundException("File " + path + " not found.");
        }
        final long len = status.getLen();
        if (op == GetOpParam.Op.OPEN) {
            if (openOffset < 0L || (openOffset >= len && len > 0)) {
                throw new IOException("Offset=" + openOffset + " out of the range [0, " + len + "); " + op + ", path=" + path);
            }
        }
        if (len > 0) {
            final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
            final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
            final int count = locations.locatedBlockCount();
            if (count > 0) {
                return bestNode(locations.get(0).getLocations(), excludes);
            }
        }
    }
    return (DatanodeDescriptor) bm.getDatanodeManager().getNetworkTopology().chooseRandom(NodeBase.ROOT, excludes);
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Node(org.apache.hadoop.net.Node) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) HashSet(java.util.HashSet) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 32 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestHDFSServerPorts method testBackupNodePorts.

/**
     * Verify BackupNode port usage.
     */
@Test(timeout = 300000)
public void testBackupNodePorts() throws Exception {
    NameNode nn = null;
    try {
        nn = startNameNode();
        Configuration backup_config = new HdfsConfiguration(config);
        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
        // bind http server to the same port as name-node
        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
        LOG.info("= Starting 1 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
        assertFalse("Backup started on same port as Namenode", // should fail
        canStartBackupNode(backup_config));
        // bind http server to a different port
        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
        LOG.info("= Starting 2 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
        boolean started = canStartBackupNode(backup_config);
        // should start now
        assertTrue("Backup Namenode should've started", started);
    } finally {
        stopNameNode(nn);
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) Test(org.junit.Test)

Example 33 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestRollingUpgradeRollback method testRollbackCommand.

@Test
public void testRollbackCommand() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    final Path foo = new Path("/foo");
    final Path bar = new Path("/bar");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final DFSAdmin dfsadmin = new DFSAdmin(conf);
        dfs.mkdirs(foo);
        // start rolling upgrade
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        Assert.assertEquals(0, dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // create new directory
        dfs.mkdirs(bar);
        // check NNStorage
        NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
        // (startSegment, mkdir, endSegment) 
        checkNNStorage(storage, 3, -1);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
    NameNode nn = null;
    try {
        nn = NameNode.createNameNode(new String[] { "-rollingUpgrade", "rollback" }, conf);
        // make sure /foo is still there, but /bar is not
        INode fooNode = nn.getNamesystem().getFSDirectory().getINode4Write(foo.toString());
        Assert.assertNotNull(fooNode);
        INode barNode = nn.getNamesystem().getFSDirectory().getINode4Write(bar.toString());
        Assert.assertNull(barNode);
        // check the details of NNStorage
        NNStorage storage = nn.getNamesystem().getFSImage().getStorage();
        // (startSegment, upgrade marker, mkdir, endSegment)
        checkNNStorage(storage, 3, 7);
    } finally {
        if (nn != null) {
            nn.stop();
            nn.join();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) INode(org.apache.hadoop.hdfs.server.namenode.INode) Configuration(org.apache.hadoop.conf.Configuration) NNStorage(org.apache.hadoop.hdfs.server.namenode.NNStorage) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 34 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestSafeMode method testInitializeReplQueuesEarly.

/**
   * Test that the NN initializes its under-replicated blocks queue
   * before it is ready to exit safemode (HDFS-1476)
   */
@Test(timeout = 45000)
public void testInitializeReplQueuesEarly() throws Exception {
    LOG.info("Starting testInitializeReplQueuesEarly");
    // Spray the blocks around the cluster when we add DNs instead of
    // concentrating all blocks on the first node.
    BlockManagerTestUtil.setWritingPrefersLocalNode(cluster.getNamesystem().getBlockManager(), false);
    cluster.startDataNodes(conf, 2, true, StartupOption.REGULAR, null);
    cluster.waitActive();
    LOG.info("Creating files");
    DFSTestUtil.createFile(fs, TEST_PATH, 15 * BLOCK_SIZE, (short) 1, 1L);
    LOG.info("Stopping all DataNodes");
    List<DataNodeProperties> dnprops = Lists.newLinkedList();
    dnprops.add(cluster.stopDataNode(0));
    dnprops.add(cluster.stopDataNode(0));
    dnprops.add(cluster.stopDataNode(0));
    cluster.getConfiguration(0).setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, 1f / 15f);
    LOG.info("Restarting NameNode");
    cluster.restartNameNode();
    final NameNode nn = cluster.getNameNode();
    String status = nn.getNamesystem().getSafemode();
    assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "14 blocks to reach the threshold 0.9990 of total blocks 15." + NEWLINE + "The number of live datanodes 0 has reached the minimum number 0. " + "Safe mode will be turned off automatically once the thresholds " + "have been reached.", status);
    assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed", NameNodeAdapter.safeModeInitializedReplQueues(nn));
    LOG.info("Restarting one DataNode");
    cluster.restartDataNode(dnprops.remove(0));
    // Wait for block reports from all attached storages of
    // the restarted DN to come in.
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            return getLongCounter("StorageBlockReportOps", getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode();
        }
    }, 10, 10000);
    final long safe = NameNodeAdapter.getSafeModeSafeBlocks(nn);
    assertTrue("Expected first block report to make some blocks safe.", safe > 0);
    assertTrue("Did not expect first block report to make all blocks safe.", safe < 15);
    assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn));
    // Ensure that UnderReplicatedBlocks goes up to 15 - safe. Misreplicated
    // blocks are processed asynchronously so this may take a few seconds.
    // Failure here will manifest as a test timeout.
    BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
    long underReplicatedBlocks = nn.getNamesystem().getUnderReplicatedBlocks();
    while (underReplicatedBlocks != (15 - safe)) {
        LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual=" + underReplicatedBlocks);
        Thread.sleep(100);
        BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
        underReplicatedBlocks = nn.getNamesystem().getUnderReplicatedBlocks();
    }
    cluster.restartDataNodes();
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) Test(org.junit.Test)

Example 35 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestAvailableSpaceBlockPlacementPolicy method setupCluster.

@BeforeClass
public static void setupCluster() throws Exception {
    conf = new HdfsConfiguration();
    conf.setFloat(DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY, 0.6f);
    String[] racks = new String[numRacks];
    for (int i = 0; i < numRacks; i++) {
        racks[i] = "/rack" + i;
    }
    String[] owerRackOfNodes = new String[numRacks * nodesPerRack];
    for (int i = 0; i < nodesPerRack; i++) {
        for (int j = 0; j < numRacks; j++) {
            owerRackOfNodes[i * numRacks + j] = racks[j];
        }
    }
    storages = DFSTestUtil.createDatanodeStorageInfos(owerRackOfNodes);
    dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(AvailableSpaceBlockPlacementPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
    conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, AvailableSpaceBlockPlacementPolicy.class.getName());
    DFSTestUtil.formatNameNode(conf);
    namenode = new NameNode(conf);
    final BlockManager bm = namenode.getNamesystem().getBlockManager();
    placementPolicy = bm.getBlockPlacementPolicy();
    cluster = bm.getDatanodeManager().getNetworkTopology();
    for (int i = 0; i < nodesPerRack * numRacks; i++) {
        cluster.add(dataNodes[i]);
    }
    setupDataNodeCapacity();
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Aggregations

NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)65 Test (org.junit.Test)44 Configuration (org.apache.hadoop.conf.Configuration)28 Path (org.apache.hadoop.fs.Path)22 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)17 FileSystem (org.apache.hadoop.fs.FileSystem)15 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 File (java.io.File)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)6 IOException (java.io.IOException)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)4 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4