use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestNameNodeMetrics method testTransactionSinceLastCheckpointMetrics.
/**
* Testing TransactionsSinceLastCheckpoint. Need a new cluster as
* the other tests in here don't use HA. See HDFS-7501.
*/
@Test(timeout = 300000)
public void testTransactionSinceLastCheckpointMetrics() throws Exception {
Random random = new Random();
int retryCount = 0;
while (retryCount < 5) {
try {
int basePort = 10060 + random.nextInt(100) * 2;
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)).addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
HdfsConfiguration conf2 = new HdfsConfiguration();
// Lower the checkpoint condition for purpose of testing.
conf2.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 100);
// Check for checkpoint condition very often, for purpose of testing.
conf2.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
// Poll and follow ANN txns very often, for purpose of testing.
conf2.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2).nnTopology(topology).numDataNodes(1).build();
cluster2.waitActive();
DistributedFileSystem fs2 = cluster2.getFileSystem(0);
NameNode nn0 = cluster2.getNameNode(0);
NameNode nn1 = cluster2.getNameNode(1);
cluster2.transitionToActive(0);
fs2.mkdirs(new Path("/tmp-t1"));
fs2.mkdirs(new Path("/tmp-t2"));
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Test to ensure tracking works before the first-ever
// checkpoint.
assertEquals("SBN failed to track 2 transactions pre-checkpoint.", // 2 txns added further when catch-up is called.
4L, cluster2.getNameNode(1).getNamesystem().getTransactionsSinceLastCheckpoint());
// rounded at 100, as 4 + 94 + 2 (catch-up call) = 100.
for (int i = 1; i <= 94; i++) {
fs2.mkdirs(new Path("/tmp-" + i));
}
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Assert 100 transactions in checkpoint.
HATestUtil.waitForCheckpoint(cluster2, 1, ImmutableList.of(100));
// Test to ensure number tracks the right state of
// uncheckpointed edits, and does not go negative
// (as fixed in HDFS-7501).
assertEquals("Should be zero right after the checkpoint.", 0L, cluster2.getNameNode(1).getNamesystem().getTransactionsSinceLastCheckpoint());
fs2.mkdirs(new Path("/tmp-t3"));
fs2.mkdirs(new Path("/tmp-t4"));
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Test to ensure we track the right numbers after
// the checkpoint resets it to zero again.
assertEquals("SBN failed to track 2 added txns after the ckpt.", 4L, cluster2.getNameNode(1).getNamesystem().getTransactionsSinceLastCheckpoint());
cluster2.shutdown();
break;
} catch (Exception e) {
LOG.warn("Unable to set up HA cluster, exception thrown: " + e);
retryCount++;
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestStandbyCheckpoints method setupCluster.
@SuppressWarnings("rawtypes")
@Before
public void setupCluster() throws Exception {
Configuration conf = setupCommonConfig();
// Dial down the retention of extra edits and checkpoints. This is to
// help catch regressions of HDFS-4238 (SBN should not purge shared edits)
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
int retryCount = 0;
while (true) {
try {
int basePort = 10060 + random.nextInt(100) * 2;
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)).addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)).addNN(new MiniDFSNNTopology.NNConf("nn3").setHttpPort(basePort + 2)));
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(1).build();
cluster.waitActive();
setNNs();
fs = HATestUtil.configureFailoverFs(cluster, conf);
cluster.transitionToActive(0);
++retryCount;
break;
} catch (BindException e) {
LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount + " times");
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestDataNodeMultipleRegistrations method testClusterIdMismatchAtStartupWithHA.
@Test(timeout = 20000)
public void testClusterIdMismatchAtStartupWithHA() throws Exception {
MiniDFSNNTopology top = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0")).addNN(new MiniDFSNNTopology.NNConf("nn1"))).addNameservice(new MiniDFSNNTopology.NSConf("ns2").addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid")).addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid")));
top.setFederation(true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
try {
cluster.startDataNodes(conf, 1, true, null, null);
// let the initialization be complete
Thread.sleep(10000);
DataNode dn = cluster.getDataNodes().get(0);
assertTrue("Datanode should be running", dn.isDatanodeUp());
assertEquals("Only one BPOfferService should be running", 1, dn.getAllBpOs().size());
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestDataNodeMultipleRegistrations method testDNWithInvalidStorageWithHA.
@Test
public void testDNWithInvalidStorageWithHA() throws Exception {
MiniDFSNNTopology top = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1")).addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));
top.setFederation(true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
try {
cluster.startDataNodes(conf, 1, true, null, null);
// let the initialization be complete
Thread.sleep(10000);
DataNode dn = cluster.getDataNodes().get(0);
assertTrue("Datanode should be running", dn.isDatanodeUp());
assertEquals("BPOfferService should be running", 1, dn.getAllBpOs().size());
DataNodeProperties dnProp = cluster.stopDataNode(0);
cluster.getNameNode(0).stop();
cluster.getNameNode(1).stop();
Configuration nn1 = cluster.getConfiguration(0);
Configuration nn2 = cluster.getConfiguration(1);
// setting up invalid cluster
StartupOption.FORMAT.setClusterId("cluster-2");
DFSTestUtil.formatNameNode(nn1);
MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1), FSNamesystem.getNamespaceDirs(nn2), nn2);
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, false);
cluster.restartDataNode(dnProp);
// let the initialization be complete
Thread.sleep(10000);
dn = cluster.getDataNodes().get(0);
assertFalse("Datanode should have shutdown as only service failed", dn.isDatanodeUp());
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestBootstrapStandby method setupCluster.
@Before
public void setupCluster() throws IOException {
Configuration conf = new Configuration();
// duplicate code with MiniQJMHACluster#createDefaultTopology, but don't want to cross
// dependencies or munge too much code to support it all correctly
MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf("ns1");
for (int i = 0; i < maxNNCount; i++) {
nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i).setHttpPort(STARTING_PORT + i + 1));
}
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(nameservice);
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
cluster.waitActive();
nn0 = cluster.getNameNode(0);
cluster.transitionToActive(0);
// shutdown the other NNs
for (int i = 1; i < maxNNCount; i++) {
cluster.shutdownNameNode(i);
}
}
Aggregations