Search in sources :

Example 56 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestHDFSServerPorts method testBackupNodePorts.

/**
     * Verify BackupNode port usage.
     */
@Test(timeout = 300000)
public void testBackupNodePorts() throws Exception {
    NameNode nn = null;
    try {
        nn = startNameNode();
        Configuration backup_config = new HdfsConfiguration(config);
        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
        // bind http server to the same port as name-node
        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
        LOG.info("= Starting 1 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
        assertFalse("Backup started on same port as Namenode", // should fail
        canStartBackupNode(backup_config));
        // bind http server to a different port
        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
        LOG.info("= Starting 2 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
        boolean started = canStartBackupNode(backup_config);
        // should start now
        assertTrue("Backup Namenode should've started", started);
    } finally {
        stopNameNode(nn);
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) Test(org.junit.Test)

Example 57 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestRollingUpgradeRollback method testRollbackCommand.

@Test
public void testRollbackCommand() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    final Path foo = new Path("/foo");
    final Path bar = new Path("/bar");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final DFSAdmin dfsadmin = new DFSAdmin(conf);
        dfs.mkdirs(foo);
        // start rolling upgrade
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        Assert.assertEquals(0, dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // create new directory
        dfs.mkdirs(bar);
        // check NNStorage
        NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
        // (startSegment, mkdir, endSegment) 
        checkNNStorage(storage, 3, -1);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
    NameNode nn = null;
    try {
        nn = NameNode.createNameNode(new String[] { "-rollingUpgrade", "rollback" }, conf);
        // make sure /foo is still there, but /bar is not
        INode fooNode = nn.getNamesystem().getFSDirectory().getINode4Write(foo.toString());
        Assert.assertNotNull(fooNode);
        INode barNode = nn.getNamesystem().getFSDirectory().getINode4Write(bar.toString());
        Assert.assertNull(barNode);
        // check the details of NNStorage
        NNStorage storage = nn.getNamesystem().getFSImage().getStorage();
        // (startSegment, upgrade marker, mkdir, endSegment)
        checkNNStorage(storage, 3, 7);
    } finally {
        if (nn != null) {
            nn.stop();
            nn.join();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) INode(org.apache.hadoop.hdfs.server.namenode.INode) Configuration(org.apache.hadoop.conf.Configuration) NNStorage(org.apache.hadoop.hdfs.server.namenode.NNStorage) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 58 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestSafeMode method testInitializeReplQueuesEarly.

/**
   * Test that the NN initializes its under-replicated blocks queue
   * before it is ready to exit safemode (HDFS-1476)
   */
@Test(timeout = 45000)
public void testInitializeReplQueuesEarly() throws Exception {
    LOG.info("Starting testInitializeReplQueuesEarly");
    // Spray the blocks around the cluster when we add DNs instead of
    // concentrating all blocks on the first node.
    BlockManagerTestUtil.setWritingPrefersLocalNode(cluster.getNamesystem().getBlockManager(), false);
    cluster.startDataNodes(conf, 2, true, StartupOption.REGULAR, null);
    cluster.waitActive();
    LOG.info("Creating files");
    DFSTestUtil.createFile(fs, TEST_PATH, 15 * BLOCK_SIZE, (short) 1, 1L);
    LOG.info("Stopping all DataNodes");
    List<DataNodeProperties> dnprops = Lists.newLinkedList();
    dnprops.add(cluster.stopDataNode(0));
    dnprops.add(cluster.stopDataNode(0));
    dnprops.add(cluster.stopDataNode(0));
    cluster.getConfiguration(0).setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, 1f / 15f);
    LOG.info("Restarting NameNode");
    cluster.restartNameNode();
    final NameNode nn = cluster.getNameNode();
    String status = nn.getNamesystem().getSafemode();
    assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "14 blocks to reach the threshold 0.9990 of total blocks 15." + NEWLINE + "The number of live datanodes 0 has reached the minimum number 0. " + "Safe mode will be turned off automatically once the thresholds " + "have been reached.", status);
    assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed", NameNodeAdapter.safeModeInitializedReplQueues(nn));
    LOG.info("Restarting one DataNode");
    cluster.restartDataNode(dnprops.remove(0));
    // Wait for block reports from all attached storages of
    // the restarted DN to come in.
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            return getLongCounter("StorageBlockReportOps", getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode();
        }
    }, 10, 10000);
    final long safe = NameNodeAdapter.getSafeModeSafeBlocks(nn);
    assertTrue("Expected first block report to make some blocks safe.", safe > 0);
    assertTrue("Did not expect first block report to make all blocks safe.", safe < 15);
    assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn));
    // Ensure that UnderReplicatedBlocks goes up to 15 - safe. Misreplicated
    // blocks are processed asynchronously so this may take a few seconds.
    // Failure here will manifest as a test timeout.
    BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
    long underReplicatedBlocks = nn.getNamesystem().getUnderReplicatedBlocks();
    while (underReplicatedBlocks != (15 - safe)) {
        LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual=" + underReplicatedBlocks);
        Thread.sleep(100);
        BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
        underReplicatedBlocks = nn.getNamesystem().getUnderReplicatedBlocks();
    }
    cluster.restartDataNodes();
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) Test(org.junit.Test)

Example 59 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestAvailableSpaceBlockPlacementPolicy method setupCluster.

@BeforeClass
public static void setupCluster() throws Exception {
    conf = new HdfsConfiguration();
    conf.setFloat(DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY, 0.6f);
    String[] racks = new String[numRacks];
    for (int i = 0; i < numRacks; i++) {
        racks[i] = "/rack" + i;
    }
    String[] owerRackOfNodes = new String[numRacks * nodesPerRack];
    for (int i = 0; i < nodesPerRack; i++) {
        for (int j = 0; j < numRacks; j++) {
            owerRackOfNodes[i * numRacks + j] = racks[j];
        }
    }
    storages = DFSTestUtil.createDatanodeStorageInfos(owerRackOfNodes);
    dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(AvailableSpaceBlockPlacementPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
    conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, AvailableSpaceBlockPlacementPolicy.class.getName());
    DFSTestUtil.formatNameNode(conf);
    namenode = new NameNode(conf);
    final BlockManager bm = namenode.getNamesystem().getBlockManager();
    placementPolicy = bm.getBlockPlacementPolicy();
    cluster = bm.getDatanodeManager().getNetworkTopology();
    for (int i = 0; i < nodesPerRack * numRacks; i++) {
        cluster.add(dataNodes[i]);
    }
    setupDataNodeCapacity();
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Example 60 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class BlockReportTestBase method testOneReplicaRbwReportArrivesAfterBlockCompleted.

/**
   * Test for the case where one of the DNs in the pipeline is in the
   * process of doing a block report exactly when the block is closed.
   * In this case, the block report becomes delayed until after the
   * block is marked completed on the NN, and hence it reports an RBW
   * replica for a COMPLETE block. Such a report should not be marked
   * corrupt.
   * This is a regression test for HDFS-2791.
   */
@Test(timeout = 300000)
public void testOneReplicaRbwReportArrivesAfterBlockCompleted() throws Exception {
    final CountDownLatch brFinished = new CountDownLatch(1);
    DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {

        @Override
        protected Object passThrough(InvocationOnMock invocation) throws Throwable {
            try {
                return super.passThrough(invocation);
            } finally {
                // inform the test that our block report went through.
                brFinished.countDown();
            }
        }
    };
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    // Start a second DN for this test -- we're checking
    // what happens when one of the DNs is slowed for some reason.
    REPL_FACTOR = 2;
    startDNandWait(null, false);
    NameNode nn = cluster.getNameNode();
    FSDataOutputStream out = fs.create(filePath, REPL_FACTOR);
    try {
        AppendTestUtil.write(out, 0, 10);
        out.hflush();
        // Set up a spy so that we can delay the block report coming
        // from this node.
        DataNode dn = cluster.getDataNodes().get(0);
        DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
        Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.<DatanodeRegistration>anyObject(), Mockito.anyString(), Mockito.<StorageBlockReport[]>anyObject(), Mockito.<BlockReportContext>anyObject());
        // Force a block report to be generated. The block report will have
        // an RBW replica in it. Wait for the RPC to be sent, but block
        // it before it gets to the NN.
        dn.scheduleAllBlockReport(0);
        delayer.waitForCall();
    } finally {
        IOUtils.closeStream(out);
    }
    // Now that the stream is closed, the NN will have the block in COMPLETE
    // state.
    delayer.proceed();
    brFinished.await();
    // Verify that no replicas are marked corrupt, and that the
    // file is still readable.
    BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
    assertEquals(0, nn.getNamesystem().getCorruptReplicaBlocks());
    DFSTestUtil.readFile(fs, filePath);
    // Ensure that the file is readable even from the DN that we futzed with.
    cluster.stopDataNode(1);
    DFSTestUtil.readFile(fs, filePath);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) InvocationOnMock(org.mockito.invocation.InvocationOnMock) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Aggregations

NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)65 Test (org.junit.Test)44 Configuration (org.apache.hadoop.conf.Configuration)28 Path (org.apache.hadoop.fs.Path)22 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)17 FileSystem (org.apache.hadoop.fs.FileSystem)15 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 File (java.io.File)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)6 IOException (java.io.IOException)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)4 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4