use of org.apache.hadoop.hdfs.server.namenode.NNStorage in project hadoop by apache.
the class UpgradeUtilities method createNameNodeVersionFile.
/**
* Create a <code>version</code> file for namenode inside the specified parent
* directory. If such a file already exists, it will be overwritten.
* The given version string will be written to the file as the layout
* version. None of the parameters may be null.
*
* @param parent directory where namenode VERSION file is stored
* @param version StorageInfo to create VERSION file from
* @param bpid Block pool Id
*
* @return the created version file
*/
public static File[] createNameNodeVersionFile(Configuration conf, File[] parent, StorageInfo version, String bpid) throws IOException {
Storage storage = new NNStorage(conf, Collections.<URI>emptyList(), Collections.<URI>emptyList());
storage.setStorageInfo(version);
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
versionFiles[i] = new File(parent[i], "VERSION");
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
storage.writeProperties(versionFiles[i], sd);
}
return versionFiles;
}
use of org.apache.hadoop.hdfs.server.namenode.NNStorage in project hadoop by apache.
the class TestRollingUpgrade method verifyNNCheckpoint.
/**
* Verify that the namenode at the given index has an FSImage with a TxId up to txid-1
*/
private void verifyNNCheckpoint(MiniDFSCluster dfsCluster, long txid, int nnIndex) throws InterruptedException {
int retries = 0;
while (++retries < 5) {
NNStorage storage = dfsCluster.getNamesystem(nnIndex).getFSImage().getStorage();
if (storage.getFsImageName(txid - 1) != null) {
return;
}
Thread.sleep(1000);
}
Assert.fail("new checkpoint does not exist");
}
use of org.apache.hadoop.hdfs.server.namenode.NNStorage in project hadoop by apache.
the class TestRollingUpgradeDowngrade method testRejectNewFsImage.
/**
* Ensure that restart namenode with downgrade option should throw exception
* because it has been obsolete.
*/
@Test(expected = IllegalArgumentException.class)
public void testRejectNewFsImage() throws IOException {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
NNStorage storage = spy(cluster.getNameNode().getFSImage().getStorage());
int futureVersion = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1;
doReturn(futureVersion).when(storage).getServiceLayoutVersion();
storage.writeAll();
cluster.restartNameNode(0, true, "-rollingUpgrade", "downgrade");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.NNStorage in project hadoop by apache.
the class BootstrapStandby method doRun.
private int doRun() throws IOException {
// find the active NN
NamenodeProtocol proxy = null;
NamespaceInfo nsInfo = null;
boolean isUpgradeFinalized = false;
RemoteNameNodeInfo proxyInfo = null;
for (int i = 0; i < remoteNNs.size(); i++) {
proxyInfo = remoteNNs.get(i);
InetSocketAddress otherIpcAddress = proxyInfo.getIpcAddress();
proxy = createNNProtocolProxy(otherIpcAddress);
try {
// Get the namespace from any active NN. If you just formatted the primary NN and are
// bootstrapping the other NNs from that layout, it will only contact the single NN.
// However, if there cluster is already running and you are adding a NN later (e.g.
// replacing a failed NN), then this will bootstrap from any node in the cluster.
nsInfo = proxy.versionRequest();
isUpgradeFinalized = proxy.isUpgradeFinalized();
break;
} catch (IOException ioe) {
LOG.warn("Unable to fetch namespace information from remote NN at " + otherIpcAddress + ": " + ioe.getMessage());
if (LOG.isDebugEnabled()) {
LOG.debug("Full exception trace", ioe);
}
}
}
if (nsInfo == null) {
LOG.fatal("Unable to fetch namespace information from any remote NN. Possible NameNodes: " + remoteNNs);
return ERR_CODE_FAILED_CONNECT;
}
if (!checkLayoutVersion(nsInfo)) {
LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion() + ") does not match " + "this node's layout version (" + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ")");
return ERR_CODE_INVALID_VERSION;
}
System.out.println("=====================================================\n" + "About to bootstrap Standby ID " + nnId + " from:\n" + " Nameservice ID: " + nsId + "\n" + " Other Namenode ID: " + proxyInfo.getNameNodeID() + "\n" + " Other NN's HTTP address: " + proxyInfo.getHttpAddress() + "\n" + " Other NN's IPC address: " + proxyInfo.getIpcAddress() + "\n" + " Namespace ID: " + nsInfo.getNamespaceID() + "\n" + " Block pool ID: " + nsInfo.getBlockPoolID() + "\n" + " Cluster ID: " + nsInfo.getClusterID() + "\n" + " Layout version: " + nsInfo.getLayoutVersion() + "\n" + " isUpgradeFinalized: " + isUpgradeFinalized + "\n" + "=====================================================");
NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat);
if (!isUpgradeFinalized) {
// the remote NameNode is in upgrade state, this NameNode should also
// create the previous directory. First prepare the upgrade and rename
// the current dir to previous.tmp.
LOG.info("The active NameNode is in Upgrade. " + "Prepare the upgrade for the standby NameNode as well.");
if (!doPreUpgrade(storage, nsInfo)) {
return ERR_CODE_ALREADY_FORMATTED;
}
} else if (!format(storage, nsInfo)) {
// prompt the user to format storage
return ERR_CODE_ALREADY_FORMATTED;
}
// download the fsimage from active namenode
int download = downloadImage(storage, proxy, proxyInfo);
if (download != 0) {
return download;
}
// finish the upgrade: rename previous.tmp to previous
if (!isUpgradeFinalized) {
doUpgrade(storage);
}
return 0;
}
use of org.apache.hadoop.hdfs.server.namenode.NNStorage in project hadoop by apache.
the class TestRollingUpgradeRollback method testRollbackCommand.
@Test
public void testRollbackCommand() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
dfs.mkdirs(foo);
// start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0, dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// create new directory
dfs.mkdirs(bar);
// check NNStorage
NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
// (startSegment, mkdir, endSegment)
checkNNStorage(storage, 3, -1);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
NameNode nn = null;
try {
nn = NameNode.createNameNode(new String[] { "-rollingUpgrade", "rollback" }, conf);
// make sure /foo is still there, but /bar is not
INode fooNode = nn.getNamesystem().getFSDirectory().getINode4Write(foo.toString());
Assert.assertNotNull(fooNode);
INode barNode = nn.getNamesystem().getFSDirectory().getINode4Write(bar.toString());
Assert.assertNull(barNode);
// check the details of NNStorage
NNStorage storage = nn.getNamesystem().getFSImage().getStorage();
// (startSegment, upgrade marker, mkdir, endSegment)
checkNNStorage(storage, 3, 7);
} finally {
if (nn != null) {
nn.stop();
nn.join();
}
}
}
Aggregations