use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class BlockReportTestBase method testOneReplicaRbwReportArrivesAfterBlockCompleted.
/**
* Test for the case where one of the DNs in the pipeline is in the
* process of doing a block report exactly when the block is closed.
* In this case, the block report becomes delayed until after the
* block is marked completed on the NN, and hence it reports an RBW
* replica for a COMPLETE block. Such a report should not be marked
* corrupt.
* This is a regression test for HDFS-2791.
*/
@Test(timeout = 300000)
public void testOneReplicaRbwReportArrivesAfterBlockCompleted() throws Exception {
final CountDownLatch brFinished = new CountDownLatch(1);
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {
@Override
protected Object passThrough(InvocationOnMock invocation) throws Throwable {
try {
return super.passThrough(invocation);
} finally {
// inform the test that our block report went through.
brFinished.countDown();
}
}
};
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
// Start a second DN for this test -- we're checking
// what happens when one of the DNs is slowed for some reason.
REPL_FACTOR = 2;
startDNandWait(null, false);
NameNode nn = cluster.getNameNode();
FSDataOutputStream out = fs.create(filePath, REPL_FACTOR);
try {
AppendTestUtil.write(out, 0, 10);
out.hflush();
// Set up a spy so that we can delay the block report coming
// from this node.
DataNode dn = cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.<DatanodeRegistration>anyObject(), Mockito.anyString(), Mockito.<StorageBlockReport[]>anyObject(), Mockito.<BlockReportContext>anyObject());
// Force a block report to be generated. The block report will have
// an RBW replica in it. Wait for the RPC to be sent, but block
// it before it gets to the NN.
dn.scheduleAllBlockReport(0);
delayer.waitForCall();
} finally {
IOUtils.closeStream(out);
}
// Now that the stream is closed, the NN will have the block in COMPLETE
// state.
delayer.proceed();
brFinished.await();
// Verify that no replicas are marked corrupt, and that the
// file is still readable.
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
assertEquals(0, nn.getNamesystem().getCorruptReplicaBlocks());
DFSTestUtil.readFile(fs, filePath);
// Ensure that the file is readable even from the DN that we futzed with.
cluster.stopDataNode(1);
DFSTestUtil.readFile(fs, filePath);
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestBlockTokenWithDFS method testAppend.
/**
* testing that APPEND operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test
public void testAppend() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
Configuration conf = getConf(numDataNodes);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToAppend = new Path(FILE_TO_APPEND);
FileSystem fs = cluster.getFileSystem();
byte[] expected = generateBytes(FILE_SIZE);
// write a one-byte file
FSDataOutputStream stm = writeFile(fs, fileToAppend, (short) numDataNodes, BLOCK_SIZE);
stm.write(expected, 0, 1);
stm.close();
// open the file again for append
stm = fs.append(fileToAppend);
int mid = expected.length - 1;
stm.write(expected, 1, mid - 1);
stm.hflush();
/*
* wait till token used in stm expires
*/
Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
// remove a datanode to force re-establishing pipeline
cluster.stopDataNode(0);
// append the rest of the file
stm.write(expected, mid, expected.length - mid);
stm.close();
// check if append is successful
FSDataInputStream in5 = fs.open(fileToAppend);
assertTrue(checkFile1(in5, expected));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestDataNodeMultipleRegistrations method testFedSingleNN.
/**
* starts single nn and single dn and verifies registration and handshake
*
* @throws IOException
*/
@Test
public void testFedSingleNN() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nameNodePort(9927).build();
try {
NameNode nn1 = cluster.getNameNode();
assertNotNull("cannot create nn1", nn1);
String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID();
int lv1 = FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress());
// check number of vlumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0;
for (Map.Entry<String, Object> e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
}
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(), volInfos.size());
for (BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid=" + bpos.bpRegistration.getDatanodeUuid() + "; nna=" + getNNSocketAddress(bpos));
}
// try block report
BPOfferService bpos1 = dn.getAllBpOs().get(0);
bpos1.triggerBlockReportForTests();
assertEquals("wrong nn address", getNNSocketAddress(bpos1), nn1.getNameNodeAddress());
assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
assertEquals("wrong cid", dn.getClusterId(), cid1);
cluster.shutdown();
// Ensure all the BPOfferService threads are shutdown
assertEquals(0, dn.getAllBpOs().size());
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestDataNodeMultipleRegistrations method test2NNRegistration.
/**
* start multiple NNs and single DN and verifies per BP registrations and
* handshakes.
*
* @throws IOException
*/
@Test
public void test2NNRegistration() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
assertNotNull("cannot create nn1", nn1);
assertNotNull("cannot create nn2", nn2);
String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String bpid2 = FSImageTestUtil.getFSImage(nn2).getBlockPoolID();
String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID();
String cid2 = FSImageTestUtil.getFSImage(nn2).getClusterID();
int lv1 = FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
int lv2 = FSImageTestUtil.getFSImage(nn2).getLayoutVersion();
int ns1 = FSImageTestUtil.getFSImage(nn1).getNamespaceID();
int ns2 = FSImageTestUtil.getFSImage(nn2).getNamespaceID();
assertNotSame("namespace ids should be different", ns1, ns2);
LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress());
LOG.info("nn2: lv=" + lv2 + ";cid=" + cid2 + ";bpid=" + bpid2 + ";uri=" + nn2.getNameNodeAddress());
// check number of volumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
int i = 0;
for (Map.Entry<String, Object> e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
}
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(), volInfos.size());
for (BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("BP: " + bpos);
}
BPOfferService bpos1 = dn.getAllBpOs().get(0);
BPOfferService bpos2 = dn.getAllBpOs().get(1);
// The order of bpos is not guaranteed, so fix the order
if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
BPOfferService tmp = bpos1;
bpos1 = bpos2;
bpos2 = tmp;
}
assertEquals("wrong nn address", getNNSocketAddress(bpos1), nn1.getNameNodeAddress());
assertEquals("wrong nn address", getNNSocketAddress(bpos2), nn2.getNameNodeAddress());
assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2);
assertEquals("wrong cid", dn.getClusterId(), cid1);
assertEquals("cid should be same", cid2, cid1);
assertEquals("namespace should be same", bpos1.bpNSInfo.namespaceID, ns1);
assertEquals("namespace should be same", bpos2.bpNSInfo.namespaceID, ns2);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestDnRespectsBlockReportSplitThreshold method testAlwaysSplit.
/**
* Test that if splitThreshold is zero, then we always get a separate
* call per storage.
*/
@Test(timeout = 300000)
public void testAlwaysSplit() throws IOException, InterruptedException {
startUpCluster(0);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Create a file with a few blocks.
createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a block report so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerBlockReport(dn);
ArgumentCaptor<StorageBlockReport[]> captor = ArgumentCaptor.forClass(StorageBlockReport[].class);
Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(any(DatanodeRegistration.class), anyString(), captor.capture(), Mockito.<BlockReportContext>anyObject());
verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
}
Aggregations