use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class TestCheckPointForSecurityTokens method testSaveNamespace.
/**
* Tests save namespace.
*/
@Test
public void testSaveNamespace() throws IOException {
DistributedFileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs = cluster.getFileSystem();
FSNamesystem namesystem = cluster.getNamesystem();
String renewer = UserGroupInformation.getLoginUser().getUserName();
Token<DelegationTokenIdentifier> token1 = namesystem.getDelegationToken(new Text(renewer));
Token<DelegationTokenIdentifier> token2 = namesystem.getDelegationToken(new Text(renewer));
// Saving image without safe mode should fail
DFSAdmin admin = new DFSAdmin(conf);
String[] args = new String[] { "-saveNamespace" };
// verify that the edits file is NOT empty
NameNode nn = cluster.getNameNode();
for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.scanLog(Long.MAX_VALUE, true);
long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should have 5 transactions", 5, numTransactions);
;
}
// Saving image in safe mode should succeed
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
} catch (Exception e) {
throw new IOException(e.getMessage());
}
// verify that the edits file is empty except for the START txn
for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.scanLog(Long.MAX_VALUE, true);
long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should only have START txn", 1, numTransactions);
}
// restart cluster
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
//Should be able to renew & cancel the delegation token after cluster restart
try {
renewToken(token1);
renewToken(token2);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
namesystem = cluster.getNamesystem();
Token<DelegationTokenIdentifier> token3 = namesystem.getDelegationToken(new Text(renewer));
Token<DelegationTokenIdentifier> token4 = namesystem.getDelegationToken(new Text(renewer));
// restart cluster again
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
Token<DelegationTokenIdentifier> token5 = namesystem.getDelegationToken(new Text(renewer));
try {
renewToken(token1);
renewToken(token2);
renewToken(token3);
renewToken(token4);
renewToken(token5);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
// restart cluster again
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
try {
renewToken(token1);
cancelToken(token1);
renewToken(token2);
cancelToken(token2);
renewToken(token3);
cancelToken(token3);
renewToken(token4);
cancelToken(token4);
renewToken(token5);
cancelToken(token5);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
} finally {
if (fs != null)
fs.close();
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.
the class TestBackupNode method testBackupNodeTailsEdits.
/**
* Ensure that the backupnode will tail edits from the NN
* and keep in sync, even while the NN rolls, checkpoints
* occur, etc.
*/
@Test
public void testBackupNodeTailsEdits() throws Exception {
Configuration conf = new HdfsConfiguration();
HAUtil.setAllowStandbyReads(conf, true);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
BackupNode backup = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
fileSys = cluster.getFileSystem();
backup = startBackupNode(conf, StartupOption.BACKUP, 1);
BackupImage bnImage = (BackupImage) backup.getFSImage();
testBNInSync(cluster, backup, 1);
// Force a roll -- BN should roll with NN.
NameNode nn = cluster.getNameNode();
NamenodeProtocols nnRpc = nn.getRpcServer();
nnRpc.rollEditLog();
assertEquals(bnImage.getEditLog().getCurSegmentTxId(), nn.getFSImage().getEditLog().getCurSegmentTxId());
// BN should stay in sync after roll
testBNInSync(cluster, backup, 2);
long nnImageBefore = nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
// BN checkpoint
backup.doCheckpoint();
// NN should have received a new image
long nnImageAfter = nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
assertTrue("nn should have received new checkpoint. before: " + nnImageBefore + " after: " + nnImageAfter, nnImageAfter > nnImageBefore);
// BN should stay in sync after checkpoint
testBNInSync(cluster, backup, 3);
// Stop BN
StorageDirectory sd = bnImage.getStorage().getStorageDir(0);
backup.stop();
backup = null;
// When shutting down the BN, it shouldn't finalize logs that are
// still open on the NN
EditLogFile editsLog = FSImageTestUtil.findLatestEditsLog(sd);
assertEquals(editsLog.getFirstTxId(), nn.getFSImage().getEditLog().getCurSegmentTxId());
assertTrue("Should not have finalized " + editsLog, editsLog.isInProgress());
// do some edits
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
// start a new backup node
backup = startBackupNode(conf, StartupOption.BACKUP, 1);
testBNInSync(cluster, backup, 4);
assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down", false));
// Trigger an unclean shutdown of the backup node. Backup node will not
// unregister from the active when this is done simulating a node crash.
backup.stop(false);
// do some edits on the active. This should go through without failing.
// This will verify that active is still up and can add entries to
// master editlog.
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down-2")));
} finally {
LOG.info("Shutting down...");
if (backup != null)
backup.stop();
if (fileSys != null)
fileSys.close();
if (cluster != null)
cluster.shutdown();
}
assertStorageDirsMatch(cluster.getNameNode(), backup);
}
Aggregations