use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class TestBlockRecovery method startUp.
/**
* Starts an instance of DataNode
* @throws IOException
*/
@Before
public void startUp() throws IOException, URISyntaxException {
tearDownDone = false;
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
if (currentTestName.getMethodName().contains("DoesNotHoldLock")) {
// This test requires a very long value for the xceiver stop timeout.
conf.setLong(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, TEST_STOP_WORKER_XCEIVER_STOP_TIMEOUT_MILLIS);
}
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
FileSystem.setDefaultUri(conf, "hdfs://" + NN_ADDR.getHostName() + ":" + NN_ADDR.getPort());
ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
File dataDir = new File(DATA_DIR);
FileUtil.fullyDelete(dataDir);
dataDir.mkdirs();
StorageLocation location = StorageLocation.parse(dataDir.getPath());
locations.add(location);
final DatanodeProtocolClientSideTranslatorPB namenode = mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doAnswer(new Answer<DatanodeRegistration>() {
@Override
public DatanodeRegistration answer(InvocationOnMock invocation) throws Throwable {
return (DatanodeRegistration) invocation.getArguments()[0];
}
}).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class));
when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1, CLUSTER_ID, POOL_ID, 1L));
when(namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class))).thenReturn(new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current().nextLong() | 1L));
dn = new DataNode(conf, locations, null, null) {
@Override
DatanodeProtocolClientSideTranslatorPB connectToNN(InetSocketAddress nnAddr) throws IOException {
Assert.assertEquals(NN_ADDR, nnAddr);
return namenode;
}
};
// Trigger a heartbeat so that it acknowledges the NN as active.
dn.getAllBpOs().get(0).triggerHeartbeatForTests();
waitForActiveNN();
spyDN = spy(dn);
recoveryWorker = new BlockRecoveryWorker(spyDN);
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class TestDatanodeProtocolRetryPolicy method testDatanodeRegistrationRetry.
/**
* Verify the following scenario.
* 1. The initial DatanodeProtocol.registerDatanode succeeds.
* 2. DN starts heartbeat process.
* 3. In the first heartbeat, NN asks DN to reregister.
* 4. DN calls DatanodeProtocol.registerDatanode.
* 5. DatanodeProtocol.registerDatanode throws EOFException.
* 6. DN retries.
* 7. DatanodeProtocol.registerDatanode succeeds.
*/
@Test(timeout = 60000)
public void testDatanodeRegistrationRetry() throws Exception {
final DatanodeProtocolClientSideTranslatorPB namenode = mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doAnswer(new Answer<DatanodeRegistration>() {
int i = 0;
@Override
public DatanodeRegistration answer(InvocationOnMock invocation) throws Throwable {
i++;
if (i > 1 && i < 5) {
LOG.info("mockito exception " + i);
throw new EOFException("TestDatanodeProtocolRetryPolicy");
} else {
DatanodeRegistration dr = (DatanodeRegistration) invocation.getArguments()[0];
datanodeRegistration = new DatanodeRegistration(dr.getDatanodeUuid(), dr);
LOG.info("mockito succeeded " + datanodeRegistration);
return datanodeRegistration;
}
}
}).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class));
when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1, CLUSTER_ID, POOL_ID, 1L));
Mockito.doAnswer(new Answer<HeartbeatResponse>() {
int i = 0;
@Override
public HeartbeatResponse answer(InvocationOnMock invocation) throws Throwable {
i++;
HeartbeatResponse heartbeatResponse;
if (i == 1) {
LOG.info("mockito heartbeatResponse registration " + i);
heartbeatResponse = new HeartbeatResponse(new DatanodeCommand[] { RegisterCommand.REGISTER }, new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current().nextLong() | 1L);
} else {
LOG.info("mockito heartbeatResponse " + i);
heartbeatResponse = new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current().nextLong() | 1L);
}
return heartbeatResponse;
}
}).when(namenode).sendHeartbeat(Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class));
dn = new DataNode(conf, locations, null, null) {
@Override
DatanodeProtocolClientSideTranslatorPB connectToNN(InetSocketAddress nnAddr) throws IOException {
Assert.assertEquals(NN_ADDR, nnAddr);
return namenode;
}
};
// Trigger a heartbeat so that it acknowledges the NN as active.
dn.getAllBpOs().get(0).triggerHeartbeatForTests();
waitForBlockReport(namenode);
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class TestTransferRbw method testTransferRbw.
@Test
public void testTransferRbw() throws Exception {
final HdfsConfiguration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
try {
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
//create a file, write some data and leave it open.
final Path p = new Path("/foo");
final int size = (1 << 16) + RAN.nextInt(1 << 16);
LOG.info("size = " + size);
final FSDataOutputStream out = fs.create(p, REPLICATION);
final byte[] bytes = new byte[1024];
for (int remaining = size; remaining > 0; ) {
RAN.nextBytes(bytes);
final int len = bytes.length < remaining ? bytes.length : remaining;
out.write(bytes, 0, len);
out.hflush();
remaining -= len;
}
//get the RBW
final ReplicaBeingWritten oldrbw;
final DataNode newnode;
final DatanodeInfo newnodeinfo;
final String bpid = cluster.getNamesystem().getBlockPoolId();
{
final DataNode oldnode = cluster.getDataNodes().get(0);
oldrbw = getRbw(oldnode, bpid);
LOG.info("oldrbw = " + oldrbw);
//add a datanode
cluster.startDataNodes(conf, 1, true, null, null);
newnode = cluster.getDataNodes().get(REPLICATION);
final DatanodeInfo oldnodeinfo;
{
final DatanodeInfo[] datatnodeinfos = cluster.getNameNodeRpc().getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertEquals(2, datatnodeinfos.length);
int i = 0;
for (DatanodeRegistration dnReg = newnode.getDNRegistrationForBP(bpid); i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++) ;
Assert.assertTrue(i < datatnodeinfos.length);
newnodeinfo = datatnodeinfos[i];
oldnodeinfo = datatnodeinfos[1 - i];
}
//transfer RBW
final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(), oldrbw.getGenerationStamp());
final BlockOpResponseProto s = DFSTestUtil.transferRbw(b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
Assert.assertEquals(Status.SUCCESS, s.getStatus());
}
//check new rbw
final ReplicaBeingWritten newrbw = getRbw(newnode, bpid);
LOG.info("newrbw = " + newrbw);
Assert.assertEquals(oldrbw.getBlockId(), newrbw.getBlockId());
Assert.assertEquals(oldrbw.getGenerationStamp(), newrbw.getGenerationStamp());
Assert.assertEquals(oldrbw.getVisibleLength(), newrbw.getVisibleLength());
LOG.info("DONE");
} finally {
cluster.shutdown();
}
}
Aggregations