use of org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf in project hadoop by apache.
the class MiniDFSCluster method configureNameService.
/**
* Do the rest of the NN configuration for things like shared edits,
* as well as directory formatting, etc. for a single nameservice
* @param nnCounter the count of the number of namenodes already configured/started. Also,
* acts as the <i>index</i> to the next NN to start (since indicies start at 0).
* @throws IOException
*/
private void configureNameService(MiniDFSNNTopology.NSConf nameservice, int nsCounter, boolean manageNameDfsSharedDirs, boolean manageNameDfsDirs, boolean enableManagedDfsDirsRedundancy, boolean format, StartupOption operation, String clusterId, final int nnCounter) throws IOException {
String nsId = nameservice.getId();
String lastDefaultFileSystem = null;
// If HA is enabled on this nameservice, enumerate all the namenodes
// in the configuration. Also need to set a shared edits dir
int numNNs = nameservice.getNNs().size();
if (numNNs > 1 && manageNameDfsSharedDirs) {
URI sharedEditsUri = getSharedEditsDir(nnCounter, nnCounter + numNNs - 1);
conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY, sharedEditsUri.toString());
// Clean out the shared edits dir completely, including all subdirectories.
FileUtil.fullyDelete(new File(sharedEditsUri));
}
// Now format first NN and copy the storage directory from that node to the others.
int nnIndex = nnCounter;
Collection<URI> prevNNDirs = null;
for (NNConf nn : nameservice.getNNs()) {
initNameNodeConf(conf, nsId, nsCounter, nn.getNnId(), manageNameDfsDirs, manageNameDfsDirs, nnIndex);
Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
if (format) {
// delete the existing namespaces
for (URI nameDirUri : namespaceDirs) {
File nameDir = new File(nameDirUri);
if (nameDir.exists() && !FileUtil.fullyDelete(nameDir)) {
throw new IOException("Could not fully delete " + nameDir);
}
}
// delete the checkpoint directories, if they exist
Collection<URI> checkpointDirs = Util.stringCollectionAsURIs(conf.getTrimmedStringCollection(DFS_NAMENODE_CHECKPOINT_DIR_KEY));
for (URI checkpointDirUri : checkpointDirs) {
File checkpointDir = new File(checkpointDirUri);
if (checkpointDir.exists() && !FileUtil.fullyDelete(checkpointDir)) {
throw new IOException("Could not fully delete " + checkpointDir);
}
}
}
boolean formatThisOne = format;
// if we are looking at not the first NN
if (nnIndex++ > nnCounter && format) {
// Don't format the second, third, etc NN in an HA setup - that
// would result in it having a different clusterID,
// block pool ID, etc. Instead, copy the name dirs
// from the previous one.
formatThisOne = false;
assert (null != prevNNDirs);
copyNameDirs(prevNNDirs, namespaceDirs, conf);
}
if (formatThisOne) {
// misconfiguration.
if (nn.getClusterId() == null) {
StartupOption.FORMAT.setClusterId(clusterId);
} else {
StartupOption.FORMAT.setClusterId(nn.getClusterId());
}
DFSTestUtil.formatNameNode(conf);
}
prevNNDirs = namespaceDirs;
}
// create all the namenodes in the namespace
nnIndex = nnCounter;
for (NNConf nn : nameservice.getNNs()) {
Configuration hdfsConf = new Configuration(conf);
initNameNodeConf(hdfsConf, nsId, nsCounter, nn.getNnId(), manageNameDfsDirs, enableManagedDfsDirsRedundancy, nnIndex++);
createNameNode(hdfsConf, false, operation, clusterId, nsId, nn.getNnId());
// Record the last namenode uri
lastDefaultFileSystem = hdfsConf.get(FS_DEFAULT_NAME_KEY);
}
if (!federation && lastDefaultFileSystem != null) {
// Set the default file system to the actual bind address of NN.
conf.set(FS_DEFAULT_NAME_KEY, lastDefaultFileSystem);
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf in project hadoop by apache.
the class MiniDFSCluster method configureNameNodes.
/**
* Do the basic NN configuration for the topology. Does not configure things like the shared
* edits directories
* @param nnTopology
* @param federation
* @param conf
* @throws IOException
*/
public static void configureNameNodes(MiniDFSNNTopology nnTopology, boolean federation, Configuration conf) throws IOException {
Preconditions.checkArgument(nnTopology.countNameNodes() > 0, "empty NN topology: no namenodes specified!");
if (!federation && nnTopology.countNameNodes() == 1) {
NNConf onlyNN = nnTopology.getOnlyNameNode();
// we only had one NN, set DEFAULT_NAME for it. If not explicitly
// specified initially, the port will be 0 to make NN bind to any
// available port. It will be set to the right address after
// NN is started.
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:" + onlyNN.getIpcPort());
}
List<String> allNsIds = Lists.newArrayList();
for (MiniDFSNNTopology.NSConf nameservice : nnTopology.getNameservices()) {
if (nameservice.getId() != null) {
allNsIds.add(nameservice.getId());
}
}
if (!allNsIds.isEmpty()) {
conf.set(DFS_NAMESERVICES, Joiner.on(",").join(allNsIds));
}
for (MiniDFSNNTopology.NSConf nameservice : nnTopology.getNameservices()) {
String nsId = nameservice.getId();
Preconditions.checkArgument(!federation || nsId != null, "if there is more than one NS, they must have names");
// First set up the configuration which all of the NNs
// need to have - have to do this a priori before starting
// *any* of the NNs, so they know to come up in standby.
List<String> nnIds = Lists.newArrayList();
// Iterate over the NNs in this nameservice
for (NNConf nn : nameservice.getNNs()) {
nnIds.add(nn.getNnId());
initNameNodeAddress(conf, nameservice.getId(), nn);
}
// in the configuration. Also need to set a shared edits dir
if (nnIds.size() > 1) {
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, nameservice.getId()), Joiner.on(",").join(nnIds));
}
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf in project hadoop by apache.
the class TestBalancerWithHANameNodes method testBalancerWithHANameNodes.
/**
* Test a cluster with even distribution, then a new empty node is added to
* the cluster. Test start a cluster with specified number of nodes, and fills
* it to be 30% full (with a single file replicated identically to all
* datanodes); It then adds one new empty node and starts balancing.
*/
@Test(timeout = 60000)
public void testBalancerWithHANameNodes() throws Exception {
Configuration conf = new HdfsConfiguration();
TestBalancer.initConf(conf);
// new node's capacity
long newNodeCapacity = TestBalancer.CAPACITY;
// new node's rack
String newNodeRack = TestBalancer.RACK2;
// array of racks for original nodes in cluster
String[] racks = new String[] { TestBalancer.RACK0, TestBalancer.RACK1 };
// array of capacities of original nodes in cluster
long[] capacities = new long[] { TestBalancer.CAPACITY, TestBalancer.CAPACITY };
assertEquals(capacities.length, racks.length);
int numOfDatanodes = capacities.length;
NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
nn1Conf.setIpcPort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
Configuration copiedConf = new Configuration(conf);
cluster = new MiniDFSCluster.Builder(copiedConf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build();
HATestUtil.setFailoverConfigurations(cluster, conf);
try {
cluster.waitActive();
cluster.transitionToActive(1);
Thread.sleep(500);
client = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf), ClientProtocol.class).getProxy();
long totalCapacity = TestBalancer.sum(capacities);
// fill up the cluster to be 30% full
long totalUsedSpace = totalCapacity * 3 / 10;
TestBalancer.createFile(cluster, TestBalancer.filePath, totalUsedSpace / numOfDatanodes, (short) numOfDatanodes, 1);
// start up an empty node with the same capacity and on the same rack
cluster.startDataNodes(conf, 1, true, null, new String[] { newNodeRack }, new long[] { newNodeCapacity });
totalCapacity += newNodeCapacity;
TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
assertEquals(1, namenodes.size());
assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, BalancerParameters.DEFAULT);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf in project hadoop by apache.
the class MiniDFSCluster method addNameNode.
/**
* Add a namenode to a federated cluster and start it. Configuration of
* datanodes in the cluster is refreshed to register with the new namenode.
*
* @return newly started namenode
*/
public void addNameNode(Configuration conf, int namenodePort) throws IOException {
if (!federation)
throw new IOException("cannot add namenode to non-federated cluster");
int nameServiceIndex = namenodes.keys().size();
String nameserviceId = NAMESERVICE_ID_PREFIX + (namenodes.keys().size() + 1);
String nameserviceIds = conf.get(DFS_NAMESERVICES);
nameserviceIds += "," + nameserviceId;
conf.set(DFS_NAMESERVICES, nameserviceIds);
String nnId = null;
initNameNodeAddress(conf, nameserviceId, new NNConf(nnId).setIpcPort(namenodePort));
// figure out the current number of NNs
NameNodeInfo[] infos = this.getNameNodeInfos(nameserviceId);
int nnIndex = infos == null ? 0 : infos.length;
initNameNodeConf(conf, nameserviceId, nameServiceIndex, nnId, true, true, nnIndex);
createNameNode(conf, true, null, null, nameserviceId, nnId);
// Refresh datanodes with the newly started namenode
for (DataNodeProperties dn : dataNodes) {
DataNode datanode = dn.datanode;
datanode.refreshNamenodes(conf);
}
// Wait for new namenode to get registrations from all the datanodes
waitActive(nnIndex);
}
use of org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf in project hadoop by apache.
the class TestRefreshNamenodes method testRefreshNamenodes.
@Test
public void testRefreshNamenodes() throws IOException {
// Start cluster with a single NN and DN
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new NSConf("ns1").addNN(new NNConf(null).setIpcPort(nnPort1))).setFederation(true);
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).build();
DataNode dn = cluster.getDataNodes().get(0);
assertEquals(1, dn.getAllBpOs().size());
cluster.addNameNode(conf, nnPort2);
assertEquals(2, dn.getAllBpOs().size());
cluster.addNameNode(conf, nnPort3);
assertEquals(3, dn.getAllBpOs().size());
cluster.addNameNode(conf, nnPort4);
// Ensure a BPOfferService in the datanodes corresponds to
// a namenode in the cluster
Set<InetSocketAddress> nnAddrsFromCluster = Sets.newHashSet();
for (int i = 0; i < 4; i++) {
assertTrue(nnAddrsFromCluster.add(cluster.getNameNode(i).getNameNodeAddress()));
}
Set<InetSocketAddress> nnAddrsFromDN = Sets.newHashSet();
for (BPOfferService bpos : dn.getAllBpOs()) {
for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress()));
}
}
assertEquals("", Joiner.on(",").join(Sets.symmetricDifference(nnAddrsFromCluster, nnAddrsFromDN)));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations