Search in sources :

Example 61 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestSaveNamespace method testCancelSaveNamespace.

@Test(timeout = 20000)
public void testCancelSaveNamespace() throws Exception {
    Configuration conf = getConf();
    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
    DFSTestUtil.formatNameNode(conf);
    FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
    // Replace the FSImage with a spy
    final FSImage image = fsn.getFSImage();
    NNStorage storage = image.getStorage();
    // unlock any directories that
    // FSNamesystem's initialization may have locked
    storage.close();
    storage.setStorageDirectories(FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf));
    FSNamesystem spyFsn = spy(fsn);
    final FSNamesystem finalFsn = spyFsn;
    DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
    BlockIdManager bid = spy(spyFsn.getBlockManager().getBlockIdManager());
    Whitebox.setInternalState(finalFsn.getBlockManager(), "blockIdManager", bid);
    doAnswer(delayer).when(bid).getGenerationStamp();
    ExecutorService pool = Executors.newFixedThreadPool(2);
    try {
        doAnEdit(fsn, 1);
        final Canceler canceler = new Canceler();
        // Save namespace
        fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        try {
            Future<Void> saverFuture = pool.submit(new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler);
                    return null;
                }
            });
            // Wait until saveNamespace calls getGenerationStamp
            delayer.waitForCall();
            // then cancel the saveNamespace
            Future<Void> cancelFuture = pool.submit(new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    canceler.cancel("cancelled");
                    return null;
                }
            });
            // give the cancel call time to run
            Thread.sleep(500);
            // allow saveNamespace to proceed - it should check the cancel flag
            // after this point and throw an exception
            delayer.proceed();
            cancelFuture.get();
            saverFuture.get();
            fail("saveNamespace did not fail even though cancelled!");
        } catch (Throwable t) {
            GenericTestUtils.assertExceptionContains("SaveNamespaceCancelledException", t);
        }
        LOG.info("Successfully cancelled a saveNamespace");
        // Check that we have only the original image and not any
        // cruft left over from half-finished images
        FSImageTestUtil.logStorageContents(LOG, storage);
        for (StorageDirectory sd : storage.dirIterable(null)) {
            File curDir = sd.getCurrentDir();
            GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*", NNStorage.getImageFileName(0), NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX);
        }
    } finally {
        fsn.close();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Canceler(org.apache.hadoop.hdfs.util.Canceler) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) ExecutorService(java.util.concurrent.ExecutorService) BlockIdManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager) File(java.io.File) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) Test(org.junit.Test)

Example 62 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestParallelImageWrite method checkImages.

/**
   * Confirm that FSImage files in all StorageDirectory are the same,
   * and non-empty, and there are the expected number of them.
   * @param fsn - the FSNamesystem being checked.
   * @param numImageDirs - the configured number of StorageDirectory of type IMAGE. 
   * @return - the md5 hash of the most recent FSImage files, which must all be the same.
   * @throws AssertionError if image files are empty or different,
   *     if less than two StorageDirectory are provided, or if the
   *     actual number of StorageDirectory is less than configured.
   */
public static String checkImages(FSNamesystem fsn, int numImageDirs) throws Exception {
    NNStorage stg = fsn.getFSImage().getStorage();
    //any failed StorageDirectory is removed from the storageDirs list
    assertEquals("Some StorageDirectories failed Upgrade", numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE));
    assertTrue("Not enough fsimage copies in MiniDFSCluster " + "to test parallel write", numImageDirs > 1);
    // List of "current/" directory from each SD
    List<File> dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE);
    // across directories, all files with same names should be identical hashes   
    FSImageTestUtil.assertParallelFilesAreIdentical(dirs, Collections.<String>emptySet());
    FSImageTestUtil.assertSameNewestImage(dirs);
    // Return the hash of the newest image file
    StorageDirectory firstSd = stg.dirIterator(NameNodeDirType.IMAGE).next();
    File latestImage = FSImageTestUtil.findLatestImageFile(firstSd);
    String md5 = FSImageTestUtil.getImageFileMD5IgnoringTxId(latestImage);
    System.err.println("md5 of " + latestImage + ": " + md5);
    return md5;
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File)

Example 63 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestSecurityTokenEditLog method testEditLog.

/**
   * Tests transaction logging in dfs.
   */
@Test
public void testEditLog() throws IOException {
    // start a cluster 
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;
    try {
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        final FSNamesystem namesystem = cluster.getNamesystem();
        for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
            File dir = new File(it.next().getPath());
            System.out.println(dir);
        }
        FSImage fsimage = namesystem.getFSImage();
        FSEditLog editLog = fsimage.getEditLog();
        // set small size of flush buffer
        editLog.setOutputBufferCapacity(2048);
        // Create threads and make them run transactions concurrently.
        Thread[] threadId = new Thread[NUM_THREADS];
        for (int i = 0; i < NUM_THREADS; i++) {
            Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
            threadId[i] = new Thread(trans, "TransactionThread-" + i);
            threadId[i].start();
        }
        // wait for all transactions to get over
        for (int i = 0; i < NUM_THREADS; i++) {
            try {
                threadId[i].join();
            } catch (InterruptedException e) {
                // retry 
                i--;
            }
        }
        editLog.close();
        // Verify that we can read in all the transactions that we have written.
        // If there were any corruptions, it is likely that the reading in
        // of these transactions will throw an exception.
        //
        namesystem.getDelegationTokenSecretManager().stopThreads();
        int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
        int expectedTransactions = NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys + // + 2 for BEGIN and END txns
        2;
        for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
            File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
            System.out.println("Verifying file: " + editFile);
            FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
            long numEdits = loader.loadFSEdits(new EditLogFileInputStream(editFile), 1);
            assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
        }
    } finally {
        if (fileSys != null)
            fileSys.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URI(java.net.URI) FileSystem(org.apache.hadoop.fs.FileSystem) File(java.io.File) Test(org.junit.Test)

Example 64 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestStorageRestore method invalidateStorage.

/**
   * invalidate storage by removing the second and third storage directories
   */
public void invalidateStorage(FSImage fi, Set<File> filesToInvalidate) throws IOException {
    ArrayList<StorageDirectory> al = new ArrayList<StorageDirectory>(2);
    Iterator<StorageDirectory> it = fi.getStorage().dirIterator();
    while (it.hasNext()) {
        StorageDirectory sd = it.next();
        if (filesToInvalidate.contains(sd.getRoot())) {
            LOG.info("causing IO error on " + sd.getRoot());
            al.add(sd);
        }
    }
    // simulate an error
    fi.getStorage().reportErrorsOnDirectories(al);
    for (JournalAndStream j : fi.getEditLog().getJournals()) {
        if (j.getManager() instanceof FileJournalManager) {
            FileJournalManager fm = (FileJournalManager) j.getManager();
            if (fm.getStorageDirectory().getRoot().equals(path2) || fm.getStorageDirectory().getRoot().equals(path3)) {
                EditLogOutputStream mockStream = spy(j.getCurrentStream());
                j.setCurrentStreamForTests(mockStream);
                doThrow(new IOException("Injected fault: write")).when(mockStream).write(Mockito.<FSEditLogOp>anyObject());
            }
        }
    }
}
Also used : JournalAndStream(org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream) ArrayList(java.util.ArrayList) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException)

Example 65 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestHAStateTransitions method createEmptyInProgressEditLog.

private static void createEmptyInProgressEditLog(MiniDFSCluster cluster, NameNode nn, boolean writeHeader) throws IOException {
    long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
    URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
    File sharedEditsDir = new File(sharedEditsUri.getPath());
    StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
    File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir, txid + 1);
    assertTrue("Failed to create in-progress edits file", inProgressFile.createNewFile());
    if (writeHeader) {
        DataOutputStream out = new DataOutputStream(new FileOutputStream(inProgressFile));
        EditLogFileOutputStream.writeHeader(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
        out.close();
    }
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) EditLogFileOutputStream(org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream) FileOutputStream(java.io.FileOutputStream) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) URI(java.net.URI) File(java.io.File)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4