Search in sources :

Example 11 with EditLogFile

use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.

the class TestCheckPointForSecurityTokens method testSaveNamespace.

/**
   * Tests save namespace.
   */
@Test
public void testSaveNamespace() throws IOException {
    DistributedFileSystem fs = null;
    try {
        Configuration conf = new HdfsConfiguration();
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        FSNamesystem namesystem = cluster.getNamesystem();
        String renewer = UserGroupInformation.getLoginUser().getUserName();
        Token<DelegationTokenIdentifier> token1 = namesystem.getDelegationToken(new Text(renewer));
        Token<DelegationTokenIdentifier> token2 = namesystem.getDelegationToken(new Text(renewer));
        // Saving image without safe mode should fail
        DFSAdmin admin = new DFSAdmin(conf);
        String[] args = new String[] { "-saveNamespace" };
        // verify that the edits file is NOT empty
        NameNode nn = cluster.getNameNode();
        for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
            EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
            assertTrue(log.isInProgress());
            log.scanLog(Long.MAX_VALUE, true);
            long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
            assertEquals("In-progress log " + log + " should have 5 transactions", 5, numTransactions);
            ;
        }
        // Saving image in safe mode should succeed
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        try {
            admin.run(args);
        } catch (Exception e) {
            throw new IOException(e.getMessage());
        }
        // verify that the edits file is empty except for the START txn
        for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
            EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
            assertTrue(log.isInProgress());
            log.scanLog(Long.MAX_VALUE, true);
            long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
            assertEquals("In-progress log " + log + " should only have START txn", 1, numTransactions);
        }
        // restart cluster
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        //Should be able to renew & cancel the delegation token after cluster restart
        try {
            renewToken(token1);
            renewToken(token2);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
        namesystem = cluster.getNamesystem();
        Token<DelegationTokenIdentifier> token3 = namesystem.getDelegationToken(new Text(renewer));
        Token<DelegationTokenIdentifier> token4 = namesystem.getDelegationToken(new Text(renewer));
        // restart cluster again
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        namesystem = cluster.getNamesystem();
        Token<DelegationTokenIdentifier> token5 = namesystem.getDelegationToken(new Text(renewer));
        try {
            renewToken(token1);
            renewToken(token2);
            renewToken(token3);
            renewToken(token4);
            renewToken(token5);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
        // restart cluster again
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        namesystem = cluster.getNamesystem();
        try {
            renewToken(token1);
            cancelToken(token1);
            renewToken(token2);
            cancelToken(token2);
            renewToken(token3);
            cancelToken(token3);
            renewToken(token4);
            cancelToken(token4);
            renewToken(token5);
            cancelToken(token5);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
    } finally {
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) Text(org.apache.hadoop.io.Text) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 12 with EditLogFile

use of org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile in project hadoop by apache.

the class TestBackupNode method testBackupNodeTailsEdits.

/**
   * Ensure that the backupnode will tail edits from the NN
   * and keep in sync, even while the NN rolls, checkpoints
   * occur, etc.
   */
@Test
public void testBackupNodeTailsEdits() throws Exception {
    Configuration conf = new HdfsConfiguration();
    HAUtil.setAllowStandbyReads(conf, true);
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;
    BackupNode backup = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        fileSys = cluster.getFileSystem();
        backup = startBackupNode(conf, StartupOption.BACKUP, 1);
        BackupImage bnImage = (BackupImage) backup.getFSImage();
        testBNInSync(cluster, backup, 1);
        // Force a roll -- BN should roll with NN.
        NameNode nn = cluster.getNameNode();
        NamenodeProtocols nnRpc = nn.getRpcServer();
        nnRpc.rollEditLog();
        assertEquals(bnImage.getEditLog().getCurSegmentTxId(), nn.getFSImage().getEditLog().getCurSegmentTxId());
        // BN should stay in sync after roll
        testBNInSync(cluster, backup, 2);
        long nnImageBefore = nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
        // BN checkpoint
        backup.doCheckpoint();
        // NN should have received a new image
        long nnImageAfter = nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
        assertTrue("nn should have received new checkpoint. before: " + nnImageBefore + " after: " + nnImageAfter, nnImageAfter > nnImageBefore);
        // BN should stay in sync after checkpoint
        testBNInSync(cluster, backup, 3);
        // Stop BN
        StorageDirectory sd = bnImage.getStorage().getStorageDir(0);
        backup.stop();
        backup = null;
        // When shutting down the BN, it shouldn't finalize logs that are
        // still open on the NN
        EditLogFile editsLog = FSImageTestUtil.findLatestEditsLog(sd);
        assertEquals(editsLog.getFirstTxId(), nn.getFSImage().getEditLog().getCurSegmentTxId());
        assertTrue("Should not have finalized " + editsLog, editsLog.isInProgress());
        // do some edits
        assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
        // start a new backup node
        backup = startBackupNode(conf, StartupOption.BACKUP, 1);
        testBNInSync(cluster, backup, 4);
        assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down", false));
        // Trigger an unclean shutdown of the backup node. Backup node will not
        // unregister from the active when this is done simulating a node crash.
        backup.stop(false);
        // do some edits on the active. This should go through without failing.
        // This will verify that active is still up and can add entries to
        // master editlog.
        assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down-2")));
    } finally {
        LOG.info("Shutting down...");
        if (backup != null)
            backup.stop();
        if (fileSys != null)
            fileSys.close();
        if (cluster != null)
            cluster.shutdown();
    }
    assertStorageDirsMatch(cluster.getNameNode(), backup);
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Aggregations

EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)12 File (java.io.File)5 Configuration (org.apache.hadoop.conf.Configuration)4 IOException (java.io.IOException)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 Test (org.junit.Test)3 RandomAccessFile (java.io.RandomAccessFile)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)2 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 FileInputStream (java.io.FileInputStream)1 ServletContext (javax.servlet.ServletContext)1 NewEpochResponseProto (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)1 SegmentStateProto (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)1 DelegationTokenIdentifier (org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier)1 FileJournalManager (org.apache.hadoop.hdfs.server.namenode.FileJournalManager)1