Search in sources :

Example 6 with NNHAStatusHeartbeat

use of org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat in project hadoop by apache.

the class TestBPOfferService method testTrySendErrorReportWhenStandbyNNTimesOut.

/**
   * This test case test the {@link BPOfferService#trySendErrorReport} method
   * such that if call to standby namenode times out then that should not 
   * affect the active namenode heartbeat processing since this function 
   * are in writeLock.
   * @throws Exception
   */
@Test
public void testTrySendErrorReportWhenStandbyNNTimesOut() throws Exception {
    BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
    bpos.start();
    try {
        waitForInitialization(bpos);
        // Should start with neither NN as active.
        assertNull(bpos.getActiveNN());
        // Have NN1 claim active at txid 1
        mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
        bpos.triggerHeartbeatForTests();
        // Now mockNN1 is acting like active namenode and mockNN2 as Standby
        assertSame(mockNN1, bpos.getActiveNN());
        Mockito.doAnswer(new BPOfferServiceSynchronousCallAnswer(0)).when(mockNN1).errorReport(Mockito.any(DatanodeRegistration.class), Mockito.anyInt(), Mockito.anyString());
        Mockito.doAnswer(new BPOfferServiceSynchronousCallAnswer(1)).when(mockNN2).errorReport(Mockito.any(DatanodeRegistration.class), Mockito.anyInt(), Mockito.anyString());
        String errorString = "Can't send invalid block " + FAKE_BLOCK;
        bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errorString);
        bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errorString);
        Thread.sleep(10000);
        long difference = secondCallTime - firstCallTime;
        assertTrue("Active namenode trySendErrorReport processing " + "should be independent of standby namenode trySendErrorReport" + " processing ", difference < 5000);
    } finally {
        bpos.stop();
        bpos.join();
    }
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) NNHAStatusHeartbeat(org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat) Test(org.junit.Test)

Example 7 with NNHAStatusHeartbeat

use of org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat in project hadoop by apache.

the class TestBPOfferService method testReportBadBlocksWhenNNThrowsStandbyException.

/**
   * This test case doesn't add the reportBadBlock request to
   * {@link BPServiceActor#bpThreadEnqueue} when the Standby namenode throws
   * {@link StandbyException}
   * @throws Exception
   */
@Test
public void testReportBadBlocksWhenNNThrowsStandbyException() throws Exception {
    BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
    bpos.start();
    try {
        waitForInitialization(bpos);
        // Should start with neither NN as active.
        assertNull(bpos.getActiveNN());
        // Have NN1 claim active at txid 1
        mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
        bpos.triggerHeartbeatForTests();
        // Now mockNN1 is acting like active namenode and mockNN2 as Standby
        assertSame(mockNN1, bpos.getActiveNN());
        // Return nothing when active Active Namenode calls reportBadBlocks
        Mockito.doNothing().when(mockNN1).reportBadBlocks(Mockito.any(LocatedBlock[].class));
        RemoteException re = new RemoteException(StandbyException.class.getName(), "Operation category WRITE is not supported in state " + "standby", RpcErrorCodeProto.ERROR_APPLICATION);
        // Return StandbyException wrapped in RemoteException when Standby NN
        // calls reportBadBlocks
        Mockito.doThrow(re).when(mockNN2).reportBadBlocks(Mockito.any(LocatedBlock[].class));
        bpos.reportBadBlocks(FAKE_BLOCK, mockFSDataset.getVolume(FAKE_BLOCK).getStorageID(), mockFSDataset.getVolume(FAKE_BLOCK).getStorageType());
        // Send heartbeat so that the BpServiceActor can report bad block to
        // namenode
        bpos.triggerHeartbeatForTests();
        Mockito.verify(mockNN2, Mockito.times(1)).reportBadBlocks(Mockito.any(LocatedBlock[].class));
        // Trigger another heartbeat, this will send reportBadBlock again if it
        // is present in the queue.
        bpos.triggerHeartbeatForTests();
        Mockito.verify(mockNN2, Mockito.times(1)).reportBadBlocks(Mockito.any(LocatedBlock[].class));
    } finally {
        bpos.stop();
        bpos.join();
    }
}
Also used : NNHAStatusHeartbeat(org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat) StandbyException(org.apache.hadoop.ipc.StandbyException) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 8 with NNHAStatusHeartbeat

use of org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat in project hadoop by apache.

the class TestBPOfferService method setupNNMock.

/**
   * Set up a mock NN with the bare minimum for a DN to register to it.
   */
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx) throws Exception {
    DatanodeProtocolClientSideTranslatorPB mock = Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
    Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0)).when(mock).versionRequest();
    Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration()).when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
    Mockito.doAnswer(new HeartbeatAnswer(nnIdx)).when(mock).sendHeartbeat(Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class));
    mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
    datanodeCommands[nnIdx] = new DatanodeCommand[0];
    return mock;
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) NNHAStatusHeartbeat(org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat) SlowPeerReports(org.apache.hadoop.hdfs.server.protocol.SlowPeerReports) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) VolumeFailureSummary(org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)

Example 9 with NNHAStatusHeartbeat

use of org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat in project hadoop by apache.

the class TestBPOfferService method testPickActiveNameNode.

/**
   * Test that the DataNode determines the active NameNode correctly
   * based on the HA-related information in heartbeat responses.
   * See HDFS-2627.
   */
@Test
public void testPickActiveNameNode() throws Exception {
    BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
    bpos.start();
    try {
        waitForInitialization(bpos);
        // Should start with neither NN as active.
        assertNull(bpos.getActiveNN());
        // Have NN1 claim active at txid 1
        mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
        bpos.triggerHeartbeatForTests();
        assertSame(mockNN1, bpos.getActiveNN());
        // NN2 claims active at a higher txid
        mockHaStatuses[1] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 2);
        bpos.triggerHeartbeatForTests();
        assertSame(mockNN2, bpos.getActiveNN());
        // Even after another heartbeat from the first NN, it should
        // think NN2 is active, since it claimed a higher txid
        bpos.triggerHeartbeatForTests();
        assertSame(mockNN2, bpos.getActiveNN());
        // Even if NN2 goes to standby, DN shouldn't reset to talking to NN1,
        // because NN1's txid is lower than the last active txid. Instead,
        // it should consider neither active.
        mockHaStatuses[1] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 2);
        bpos.triggerHeartbeatForTests();
        assertNull(bpos.getActiveNN());
        // Now if NN1 goes back to a higher txid, it should be considered active
        mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 3);
        bpos.triggerHeartbeatForTests();
        assertSame(mockNN1, bpos.getActiveNN());
    } finally {
        bpos.stop();
        bpos.join();
    }
}
Also used : NNHAStatusHeartbeat(org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat) Test(org.junit.Test)

Example 10 with NNHAStatusHeartbeat

use of org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat in project hadoop by apache.

the class TestBlockRecovery method startUp.

/**
   * Starts an instance of DataNode
   * @throws IOException
   */
@Before
public void startUp() throws IOException, URISyntaxException {
    tearDownDone = false;
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
    conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
    conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
    if (currentTestName.getMethodName().contains("DoesNotHoldLock")) {
        // This test requires a very long value for the xceiver stop timeout.
        conf.setLong(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS);
    }
    conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
    FileSystem.setDefaultUri(conf, "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
    ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
    File dataDir = new File(DATA_DIR);
    FileUtil.fullyDelete(dataDir);
    dataDir.mkdirs();
    StorageLocation location = StorageLocation.parse(dataDir.getPath());
    locations.add(location);
    final DatanodeProtocolClientSideTranslatorPB namenode = mock(DatanodeProtocolClientSideTranslatorPB.class);
    Mockito.doAnswer(new Answer<DatanodeRegistration>() {

        @Override
        public DatanodeRegistration answer(InvocationOnMock invocation) throws Throwable {
            return (DatanodeRegistration) invocation.getArguments()[0];
        }
    }).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class));
    when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1, CLUSTER_ID, POOL_ID, 1L));
    when(namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class))).thenReturn(new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current().nextLong() | 1L));
    dn = new DataNode(conf, locations, null, null) {

        @Override
        DatanodeProtocolClientSideTranslatorPB connectToNN(InetSocketAddress nnAddr) throws IOException {
            Assert.assertEquals(NN_ADDR, nnAddr);
            return namenode;
        }
    };
    // Trigger a heartbeat so that it acknowledges the NN as active.
    dn.getAllBpOs().get(0).triggerHeartbeatForTests();
    waitForActiveNN();
    spyDN = spy(dn);
    recoveryWorker = new BlockRecoveryWorker(spyDN);
}
Also used : HeartbeatResponse(org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) VolumeFailureSummary(org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) NNHAStatusHeartbeat(org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat) InvocationOnMock(org.mockito.invocation.InvocationOnMock) SlowPeerReports(org.apache.hadoop.hdfs.server.protocol.SlowPeerReports) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) File(java.io.File) Before(org.junit.Before)

Aggregations

NNHAStatusHeartbeat (org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat)12 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)7 Test (org.junit.Test)7 HeartbeatResponse (org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse)5 SlowPeerReports (org.apache.hadoop.hdfs.server.protocol.SlowPeerReports)5 InvocationOnMock (org.mockito.invocation.InvocationOnMock)5 IOException (java.io.IOException)4 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)4 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)4 VolumeFailureSummary (org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)4 InetSocketAddress (java.net.InetSocketAddress)3 DatanodeCommand (org.apache.hadoop.hdfs.server.protocol.DatanodeCommand)3 File (java.io.File)2 ArrayList (java.util.ArrayList)2 EOFException (java.io.EOFException)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)1 RegisterCommand (org.apache.hadoop.hdfs.server.protocol.RegisterCommand)1 StorageReport (org.apache.hadoop.hdfs.server.protocol.StorageReport)1