Search in sources :

Example 41 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestStorageReport method testStorageReportHasStorageTypeAndState.

/**
   * Ensure that storage type and storage state are propagated
   * in Storage Reports.
   */
@Test
public void testStorageReportHasStorageTypeAndState() throws IOException {
    // Make sure we are not testing with the default type, that would not
    // be a very good test.
    assertNotSame(storageType, StorageType.DEFAULT);
    NameNode nn = cluster.getNameNode();
    DataNode dn = cluster.getDataNodes().get(0);
    // Insert a spy object for the NN RPC.
    DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
    // Trigger a heartbeat so there is an interaction with the spy
    // object.
    DataNodeTestUtils.triggerHeartbeat(dn);
    // Verify that the callback passed in the expected parameters.
    ArgumentCaptor<StorageReport[]> captor = ArgumentCaptor.forClass(StorageReport[].class);
    Mockito.verify(nnSpy).sendHeartbeat(any(DatanodeRegistration.class), captor.capture(), anyLong(), anyLong(), anyInt(), anyInt(), anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class));
    StorageReport[] reports = captor.getValue();
    for (StorageReport report : reports) {
        assertThat(report.getStorage().getStorageType(), is(storageType));
        assertThat(report.getStorage().getState(), is(DatanodeStorage.State.NORMAL));
    }
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) SlowPeerReports(org.apache.hadoop.hdfs.server.protocol.SlowPeerReports) VolumeFailureSummary(org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary) Test(org.junit.Test)

Example 42 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestHASafeMode method testIsInSafemode.

/**
   * DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
   * 
   * @throws Exception
   */
@Test
public void testIsInSafemode() throws Exception {
    // Check for the standby nn without client failover.
    NameNode nn2 = cluster.getNameNode(1);
    assertTrue("nn2 should be in standby state", nn2.isStandbyState());
    InetSocketAddress nameNodeAddress = nn2.getNameNodeAddress();
    Configuration conf = new Configuration();
    DistributedFileSystem dfs = new DistributedFileSystem();
    try {
        dfs.initialize(URI.create("hdfs://" + nameNodeAddress.getHostName() + ":" + nameNodeAddress.getPort()), conf);
        dfs.isInSafeMode();
        fail("StandBy should throw exception for isInSafeMode");
    } catch (IOException e) {
        if (e instanceof RemoteException) {
            assertEquals("RPC Error code should indicate app failure.", RpcErrorCodeProto.ERROR_APPLICATION, ((RemoteException) e).getErrorCode());
            IOException sbExcpetion = ((RemoteException) e).unwrapRemoteException();
            assertTrue("StandBy nn should not support isInSafeMode", sbExcpetion instanceof StandbyException);
        } else {
            throw e;
        }
    } finally {
        if (null != dfs) {
            dfs.close();
        }
    }
    // Check with Client FailOver
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    DistributedFileSystem dfsWithFailOver = (DistributedFileSystem) fs;
    assertTrue("ANN should be in SafeMode", dfsWithFailOver.isInSafeMode());
    cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode());
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) StandbyException(org.apache.hadoop.ipc.StandbyException) InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 43 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestEditLogTailer method testTailer.

@Test
public void testTailer() throws IOException, InterruptedException, ServiceFailedException {
    Configuration conf = getConf();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
    HAUtil.setAllowStandbyReads(conf, true);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
    cluster.waitActive();
    cluster.transitionToActive(0);
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    try {
        for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
            NameNodeAdapter.mkdirs(nn1, getDirPath(i), new PermissionStatus("test", "test", new FsPermission((short) 00755)), true);
        }
        HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
        for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
            assertTrue(NameNodeAdapter.getFileInfo(nn2, getDirPath(i), false).isDir());
        }
        for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
            NameNodeAdapter.mkdirs(nn1, getDirPath(i), new PermissionStatus("test", "test", new FsPermission((short) 00755)), true);
        }
        HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
        for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
            assertTrue(NameNodeAdapter.getFileInfo(nn2, getDirPath(i), false).isDir());
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus) Test(org.junit.Test)

Example 44 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestStandbyBlockManagement method testInvalidateBlock.

@Test(timeout = 60000)
public void testInvalidateBlock() throws Exception {
    Configuration conf = new Configuration();
    HAUtil.setAllowStandbyReads(conf, true);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        NameNode nn1 = cluster.getNameNode(0);
        NameNode nn2 = cluster.getNameNode(1);
        FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        Thread.sleep(1000);
        LOG.info("==================================");
        DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
        // Have to force an edit log roll so that the standby catches up
        nn1.getRpcServer().rollEditLog();
        LOG.info("==================================");
        // delete the file
        fs.delete(TEST_FILE_PATH, false);
        BlockManagerTestUtil.computeAllPendingWork(nn1.getNamesystem().getBlockManager());
        nn1.getRpcServer().rollEditLog();
        // standby nn doesn't need to invalidate blocks.
        assertEquals(0, nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
        cluster.triggerHeartbeats();
        cluster.triggerBlockReports();
        // standby nn doesn't need to invalidate blocks.
        assertEquals(0, nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
    } finally {
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 45 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestStandbyIsHot method testStandbyIsHot.

@Test(timeout = 60000)
public void testStandbyIsHot() throws Exception {
    Configuration conf = new Configuration();
    // We read from the standby to watch block locations
    HAUtil.setAllowStandbyReads(conf, true);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        NameNode nn1 = cluster.getNameNode(0);
        NameNode nn2 = cluster.getNameNode(1);
        FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        Thread.sleep(1000);
        System.err.println("==================================");
        DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
        // Have to force an edit log roll so that the standby catches up
        nn1.getRpcServer().rollEditLog();
        System.err.println("==================================");
        // Block locations should show up on standby.
        LOG.info("Waiting for block locations to appear on standby node");
        waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
        // Trigger immediate heartbeats and block reports so
        // that the active "trusts" all of the DNs
        cluster.triggerHeartbeats();
        cluster.triggerBlockReports();
        // Change replication
        LOG.info("Changing replication to 1");
        fs.setReplication(TEST_FILE_PATH, (short) 1);
        BlockManagerTestUtil.computeAllPendingWork(nn1.getNamesystem().getBlockManager());
        waitForBlockLocations(cluster, nn1, TEST_FILE, 1);
        nn1.getRpcServer().rollEditLog();
        LOG.info("Waiting for lowered replication to show up on standby");
        waitForBlockLocations(cluster, nn2, TEST_FILE, 1);
        // Change back to 3
        LOG.info("Changing replication to 3");
        fs.setReplication(TEST_FILE_PATH, (short) 3);
        BlockManagerTestUtil.computeAllPendingWork(nn1.getNamesystem().getBlockManager());
        nn1.getRpcServer().rollEditLog();
        LOG.info("Waiting for higher replication to show up on standby");
        waitForBlockLocations(cluster, nn2, TEST_FILE, 3);
    } finally {
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Aggregations

NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)65 Test (org.junit.Test)44 Configuration (org.apache.hadoop.conf.Configuration)28 Path (org.apache.hadoop.fs.Path)22 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)17 FileSystem (org.apache.hadoop.fs.FileSystem)15 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 File (java.io.File)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)6 IOException (java.io.IOException)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)4 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4