use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestFailureToReadEdits method setUpCluster.
@Before
public void setUpCluster() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 10);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
if (clusterType == TestType.SHARED_DIR_HA) {
int basePort = 10000;
int retryCount = 0;
while (true) {
try {
basePort = 10000 + RANDOM.nextInt(1000) * 4;
LOG.info("Set SHARED_DIR_HA cluster's basePort to " + basePort);
MiniDFSNNTopology topology = MiniQJMHACluster.createDefaultTopology(basePort);
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).checkExitOnShutdown(false).build();
break;
} catch (BindException e) {
if (cluster != null) {
cluster.shutdown(true);
cluster = null;
}
++retryCount;
LOG.info("SHARED_DIR_HA: MiniQJMHACluster port conflicts, retried " + retryCount + " times " + e);
}
}
} else {
Builder builder = new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0).checkExitOnShutdown(false);
miniQjmHaCluster = builder.build();
cluster = miniQjmHaCluster.getDfsCluster();
}
cluster.waitActive();
nn0 = cluster.getNameNode(0);
nn1 = cluster.getNameNode(1);
cluster.transitionToActive(0);
fs = HATestUtil.configureFailoverFs(cluster, conf);
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestInitializeSharedEdits method setupCluster.
@Before
public void setupCluster() throws IOException {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
HAUtil.setAllowStandbyReads(conf, true);
MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
cluster.waitActive();
shutdownClusterAndRemoveSharedEditsDir();
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestRemoteNameNodeInfo method testParseMultipleNameNodes.
@Test
public void testParseMultipleNameNodes() throws Exception {
// start with an empty configuration
Configuration conf = new Configuration(false);
// add in keys for each of the NNs
String nameservice = "ns1";
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf(nameservice).addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001)).addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002)).addNN(new MiniDFSNNTopology.NNConf("nn3").setIpcPort(10003)));
// add the configurations of the NNs to the passed conf, so we can parse it back out
MiniDFSCluster.configureNameNodes(topology, false, conf);
// set the 'local' one as nn1
conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
List<RemoteNameNodeInfo> nns = RemoteNameNodeInfo.getRemoteNameNodes(conf);
// make sure it matches when we pass in the nameservice
List<RemoteNameNodeInfo> nns2 = RemoteNameNodeInfo.getRemoteNameNodes(conf, nameservice);
assertEquals(nns, nns2);
}
Aggregations