use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestStandbyCheckpoints method setupCluster.
@SuppressWarnings("rawtypes")
@Before
public void setupCluster() throws Exception {
Configuration conf = setupCommonConfig();
// Dial down the retention of extra edits and checkpoints. This is to
// help catch regressions of HDFS-4238 (SBN should not purge shared edits)
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
int retryCount = 0;
while (true) {
try {
int basePort = 10060 + random.nextInt(100) * 2;
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)).addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)).addNN(new MiniDFSNNTopology.NNConf("nn3").setHttpPort(basePort + 2)));
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(1).build();
cluster.waitActive();
setNNs();
fs = HATestUtil.configureFailoverFs(cluster, conf);
cluster.transitionToActive(0);
++retryCount;
break;
} catch (BindException e) {
LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount + " times");
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestDataNodeMultipleRegistrations method testClusterIdMismatchAtStartupWithHA.
@Test(timeout = 20000)
public void testClusterIdMismatchAtStartupWithHA() throws Exception {
MiniDFSNNTopology top = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0")).addNN(new MiniDFSNNTopology.NNConf("nn1"))).addNameservice(new MiniDFSNNTopology.NSConf("ns2").addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid")).addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid")));
top.setFederation(true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
try {
cluster.startDataNodes(conf, 1, true, null, null);
// let the initialization be complete
Thread.sleep(10000);
DataNode dn = cluster.getDataNodes().get(0);
assertTrue("Datanode should be running", dn.isDatanodeUp());
assertEquals("Only one BPOfferService should be running", 1, dn.getAllBpOs().size());
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestDataNodeMultipleRegistrations method testDNWithInvalidStorageWithHA.
@Test
public void testDNWithInvalidStorageWithHA() throws Exception {
MiniDFSNNTopology top = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1")).addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));
top.setFederation(true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
try {
cluster.startDataNodes(conf, 1, true, null, null);
// let the initialization be complete
Thread.sleep(10000);
DataNode dn = cluster.getDataNodes().get(0);
assertTrue("Datanode should be running", dn.isDatanodeUp());
assertEquals("BPOfferService should be running", 1, dn.getAllBpOs().size());
DataNodeProperties dnProp = cluster.stopDataNode(0);
cluster.getNameNode(0).stop();
cluster.getNameNode(1).stop();
Configuration nn1 = cluster.getConfiguration(0);
Configuration nn2 = cluster.getConfiguration(1);
// setting up invalid cluster
StartupOption.FORMAT.setClusterId("cluster-2");
DFSTestUtil.formatNameNode(nn1);
MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1), FSNamesystem.getNamespaceDirs(nn2), nn2);
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, false);
cluster.restartDataNode(dnProp);
// let the initialization be complete
Thread.sleep(10000);
dn = cluster.getDataNodes().get(0);
assertFalse("Datanode should have shutdown as only service failed", dn.isDatanodeUp());
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hadoop by apache.
the class TestBootstrapStandby method setupCluster.
@Before
public void setupCluster() throws IOException {
Configuration conf = new Configuration();
// duplicate code with MiniQJMHACluster#createDefaultTopology, but don't want to cross
// dependencies or munge too much code to support it all correctly
MiniDFSNNTopology.NSConf nameservice = new MiniDFSNNTopology.NSConf("ns1");
for (int i = 0; i < maxNNCount; i++) {
nameservice.addNN(new MiniDFSNNTopology.NNConf("nn" + i).setHttpPort(STARTING_PORT + i + 1));
}
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(nameservice);
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
cluster.waitActive();
nn0 = cluster.getNameNode(0);
cluster.transitionToActive(0);
// shutdown the other NNs
for (int i = 1; i < maxNNCount; i++) {
cluster.shutdownNameNode(i);
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology in project hive by apache.
the class Hadoop23Shims method getMiniDfs.
// Don't move this code to the parent class. There's a binary
// incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we
// need to have two different shim classes even though they are
// exactly the same.
@Override
public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf, int numDataNodes, boolean format, String[] racks, boolean isHA) throws IOException {
configureImpersonation(conf);
MiniDFSCluster miniDFSCluster;
if (isHA) {
MiniDFSNNTopology topo = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("minidfs").addNN(new MiniDFSNNTopology.NNConf("nn1")).addNN(new MiniDFSNNTopology.NNConf("nn2")));
miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).format(format).racks(racks).nnTopology(topo).build();
miniDFSCluster.waitActive();
miniDFSCluster.transitionToActive(0);
} else {
miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).format(format).racks(racks).build();
}
// Need to set the client's KeyProvider to the NN's for JKS,
// else the updates do not get flushed properly
KeyProviderCryptoExtension keyProvider = miniDFSCluster.getNameNode(0).getNamesystem().getProvider();
if (keyProvider != null) {
try {
setKeyProvider(miniDFSCluster.getFileSystem(0).getClient(), keyProvider);
} catch (Exception err) {
throw new IOException(err);
}
}
cluster = new MiniDFSShim(miniDFSCluster);
return cluster;
}
Aggregations