use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestBPOfferService method testNNsFromDifferentClusters.
/**
* Ensure that, if the two NNs configured for a block pool
* have different block pool IDs, they will refuse to both
* register.
*/
@Test
public void testNNsFromDifferentClusters() throws Exception {
Mockito.doReturn(new NamespaceInfo(1, "fake foreign cluster", FAKE_BPID, 0)).when(mockNN1).versionRequest();
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForOneToFail(bpos);
} finally {
bpos.stop();
bpos.join();
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestBPOfferService method testNNHAStateUpdateFromVersionRequest.
/*
*
*/
@Test
public void testNNHAStateUpdateFromVersionRequest() throws Exception {
final BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
Mockito.doReturn(true).when(mockDn).areHeartbeatsDisabledForTests();
BPServiceActor actor = bpos.getBPServiceActors().get(0);
bpos.start();
waitForInitialization(bpos);
// Should start with neither NN as active.
assertNull(bpos.getActiveNN());
// getNamespaceInfo() will not include HAServiceState
NamespaceInfo nsInfo = mockNN1.versionRequest();
bpos.verifyAndSetNamespaceInfo(actor, nsInfo);
assertNull(bpos.getActiveNN());
// Change mock so getNamespaceInfo() will include HAServiceState
Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0, HAServiceState.ACTIVE)).when(mockNN1).versionRequest();
// Update the bpos NamespaceInfo
nsInfo = mockNN1.versionRequest();
bpos.verifyAndSetNamespaceInfo(actor, nsInfo);
assertNotNull(bpos.getActiveNN());
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestBPOfferService method setupNNMock.
/**
* Set up a mock NN with the bare minimum for a DN to register to it.
*/
private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx) throws Exception {
DatanodeProtocolClientSideTranslatorPB mock = Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0)).when(mock).versionRequest();
Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration()).when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
Mockito.doAnswer(new HeartbeatAnswer(nnIdx)).when(mock).sendHeartbeat(Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class));
mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
datanodeCommands[nnIdx] = new DatanodeCommand[0];
return mock;
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestBlockRecovery method startUp.
/**
* Starts an instance of DataNode
* @throws IOException
*/
@Before
public void startUp() throws IOException, URISyntaxException {
tearDownDone = false;
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
if (currentTestName.getMethodName().contains("DoesNotHoldLock")) {
// This test requires a very long value for the xceiver stop timeout.
conf.setLong(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS);
}
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
FileSystem.setDefaultUri(conf, "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
File dataDir = new File(DATA_DIR);
FileUtil.fullyDelete(dataDir);
dataDir.mkdirs();
StorageLocation location = StorageLocation.parse(dataDir.getPath());
locations.add(location);
final DatanodeProtocolClientSideTranslatorPB namenode = mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doAnswer(new Answer<DatanodeRegistration>() {
@Override
public DatanodeRegistration answer(InvocationOnMock invocation) throws Throwable {
return (DatanodeRegistration) invocation.getArguments()[0];
}
}).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class));
when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1, CLUSTER_ID, POOL_ID, 1L));
when(namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class))).thenReturn(new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current().nextLong() | 1L));
dn = new DataNode(conf, locations, null, null) {
@Override
DatanodeProtocolClientSideTranslatorPB connectToNN(InetSocketAddress nnAddr) throws IOException {
Assert.assertEquals(NN_ADDR, nnAddr);
return namenode;
}
};
// Trigger a heartbeat so that it acknowledges the NN as active.
dn.getAllBpOs().get(0).triggerHeartbeatForTests();
waitForActiveNN();
spyDN = spy(dn);
recoveryWorker = new BlockRecoveryWorker(spyDN);
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestDatanodeProtocolRetryPolicy method testDatanodeRegistrationRetry.
/**
* Verify the following scenario.
* 1. The initial DatanodeProtocol.registerDatanode succeeds.
* 2. DN starts heartbeat process.
* 3. In the first heartbeat, NN asks DN to reregister.
* 4. DN calls DatanodeProtocol.registerDatanode.
* 5. DatanodeProtocol.registerDatanode throws EOFException.
* 6. DN retries.
* 7. DatanodeProtocol.registerDatanode succeeds.
*/
@Test(timeout = 60000)
public void testDatanodeRegistrationRetry() throws Exception {
final DatanodeProtocolClientSideTranslatorPB namenode = mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doAnswer(new Answer<DatanodeRegistration>() {
int i = 0;
@Override
public DatanodeRegistration answer(InvocationOnMock invocation) throws Throwable {
i++;
if (i > 1 && i < 5) {
LOG.info("mockito exception " + i);
throw new EOFException("TestDatanodeProtocolRetryPolicy");
} else {
DatanodeRegistration dr = (DatanodeRegistration) invocation.getArguments()[0];
datanodeRegistration = new DatanodeRegistration(dr.getDatanodeUuid(), dr);
LOG.info("mockito succeeded " + datanodeRegistration);
return datanodeRegistration;
}
}
}).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class));
when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1, CLUSTER_ID, POOL_ID, 1L));
Mockito.doAnswer(new Answer<HeartbeatResponse>() {
int i = 0;
@Override
public HeartbeatResponse answer(InvocationOnMock invocation) throws Throwable {
i++;
HeartbeatResponse heartbeatResponse;
if (i == 1) {
LOG.info("mockito heartbeatResponse registration " + i);
heartbeatResponse = new HeartbeatResponse(new DatanodeCommand[] { RegisterCommand.REGISTER }, new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current().nextLong() | 1L);
} else {
LOG.info("mockito heartbeatResponse " + i);
heartbeatResponse = new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current().nextLong() | 1L);
}
return heartbeatResponse;
}
}).when(namenode).sendHeartbeat(Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class));
dn = new DataNode(conf, locations, null, null) {
@Override
DatanodeProtocolClientSideTranslatorPB connectToNN(InetSocketAddress nnAddr) throws IOException {
Assert.assertEquals(NN_ADDR, nnAddr);
return namenode;
}
};
// Trigger a heartbeat so that it acknowledges the NN as active.
dn.getAllBpOs().get(0).triggerHeartbeatForTests();
waitForBlockReport(namenode);
}
Aggregations