use of org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo in project hadoop by apache.
the class TestRollingUpgrade method testCheckpoint.
public void testCheckpoint(int nnCount) throws IOException, InterruptedException {
final Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo");
try {
cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
// start rolling upgrade
RollingUpgradeInfo info = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
queryForPreparation(dfs);
dfs.mkdirs(foo);
long txid = dfs.rollEdits();
Assert.assertTrue(txid > 0);
for (int i = 1; i < nnCount; i++) {
verifyNNCheckpoint(dfsCluster, txid, i);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo in project hadoop by apache.
the class TestRollingUpgrade method queryForPreparation.
static void queryForPreparation(DistributedFileSystem dfs) throws IOException, InterruptedException {
RollingUpgradeInfo info;
int retries = 0;
while (++retries < 10) {
info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
if (info.createdRollbackImages()) {
break;
}
Thread.sleep(1000);
}
if (retries >= 10) {
Assert.fail("Query return false");
}
}
use of org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo in project hadoop by apache.
the class TestRollingUpgrade method testRollingUpgradeWithQJM.
@Test(timeout = 30000)
public void testRollingUpgradeWithQJM() throws Exception {
String nnDirPrefix = MiniDFSCluster.getBaseDirectory() + "/nn/";
final File nn1Dir = new File(nnDirPrefix + "image1");
final File nn2Dir = new File(nnDirPrefix + "image2");
LOG.info("nn1Dir=" + nn1Dir);
LOG.info("nn2Dir=" + nn2Dir);
final Configuration conf = new HdfsConfiguration();
final MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf).build();
mjc.waitActive();
setConf(conf, nn1Dir, mjc);
{
// Start the cluster once to generate the dfs dirs
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
// Shutdown the cluster before making a copy of the namenode dir to release
// all file locks, otherwise, the copy will fail on some platforms.
cluster.shutdown();
}
MiniDFSCluster cluster2 = null;
try {
// Start a second NN pointed to the same quorum.
// We need to copy the image dir from the first NN -- or else
// the new NN will just be rejected because of Namespace mismatch.
FileUtil.fullyDelete(nn2Dir);
FileUtil.copy(nn1Dir, FileSystem.getLocal(conf).getRaw(), new Path(nn2Dir.getAbsolutePath()), false, conf);
// Start the cluster again
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
final Path baz = new Path("/baz");
final RollingUpgradeInfo info1;
{
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(foo);
//start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
info1 = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
LOG.info("START\n" + info1);
//query rolling upgrade
assertEquals(info1, dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs.mkdirs(bar);
cluster.shutdown();
}
// cluster2 takes over QJM
final Configuration conf2 = setConf(new Configuration(), nn2Dir, mjc);
cluster2 = new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build();
final DistributedFileSystem dfs2 = cluster2.getFileSystem();
// Check that cluster2 sees the edits made on cluster1
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertFalse(dfs2.exists(baz));
//query rolling upgrade in cluster2
assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs2.mkdirs(baz);
LOG.info("RESTART cluster 2");
cluster2.restartNameNode();
assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
//restart cluster with -upgrade should fail.
try {
cluster2.restartNameNode("-upgrade");
} catch (IOException e) {
LOG.info("The exception is expected.", e);
}
LOG.info("RESTART cluster 2 again");
cluster2.restartNameNode();
assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
//finalize rolling upgrade
final RollingUpgradeInfo finalize = dfs2.rollingUpgrade(RollingUpgradeAction.FINALIZE);
Assert.assertTrue(finalize.isFinalized());
LOG.info("RESTART cluster 2 with regular startup option");
cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster2.restartNameNode();
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
} finally {
if (cluster2 != null)
cluster2.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo in project hadoop by apache.
the class TestRollingUpgrade method testQuery.
private void testQuery(int nnCount) throws Exception {
final Configuration conf = new Configuration();
MiniQJMHACluster cluster = null;
try {
cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
// shutdown other NNs
for (int i = 1; i < nnCount; i++) {
dfsCluster.shutdownNameNode(i);
}
// start rolling upgrade
RollingUpgradeInfo info = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
Assert.assertFalse(info.createdRollbackImages());
// restart other NNs
for (int i = 1; i < nnCount; i++) {
dfsCluster.restartNameNode(i);
}
// check that one of the other NNs has created the rollback image and uploaded it
queryForPreparation(dfs);
// The NN should have a copy of the fsimage in case of rollbacks.
Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage().hasRollbackFSImage());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo in project hadoop by apache.
the class TestRollingUpgradeDowngrade method testDowngrade.
/**
* Downgrade option is already obsolete. It should throw exception.
* @throws Exception
*/
@Test(timeout = 300000, expected = IllegalArgumentException.class)
public void testDowngrade() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
cluster = new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
// let NN1 tail editlog every 1s
dfsCluster.getConfiguration(1).setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
// start rolling upgrade
RollingUpgradeInfo info = dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
TestRollingUpgrade.queryForPreparation(dfs);
dfs.close();
dfsCluster.restartNameNode(0, true, "-rollingUpgrade", "downgrade");
// Once downgraded, there should be no more fsimage for rollbacks.
Assert.assertFalse(dfsCluster.getNamesystem(0).getFSImage().hasRollbackFSImage());
// shutdown NN1
dfsCluster.shutdownNameNode(1);
dfsCluster.transitionToActive(0);
dfs = dfsCluster.getFileSystem(0);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations