Search in sources :

Example 16 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImageTestUtil method logStorageContents.

public static void logStorageContents(Log LOG, NNStorage storage) {
    LOG.info("current storages and corresponding sizes:");
    for (StorageDirectory sd : storage.dirIterable(null)) {
        File curDir = sd.getCurrentDir();
        LOG.info("In directory " + curDir);
        File[] files = curDir.listFiles();
        Arrays.sort(files);
        for (File f : files) {
            LOG.info("  file " + f.getAbsolutePath() + "; len = " + f.length());
        }
    }
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File)

Example 17 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImageTestUtil method mockStorageDirectory.

public static StorageDirectory mockStorageDirectory(File currentDir, NameNodeDirType type) {
    // Mock the StorageDirectory interface to just point to this file
    StorageDirectory sd = Mockito.mock(StorageDirectory.class);
    Mockito.doReturn(type).when(sd).getStorageDirType();
    Mockito.doReturn(currentDir).when(sd).getCurrentDir();
    Mockito.doReturn(currentDir).when(sd).getRoot();
    Mockito.doReturn(mockFile(true)).when(sd).getVersionFile();
    Mockito.doReturn(mockFile(false)).when(sd).getPreviousDir();
    return sd;
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)

Example 18 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestStartup method verifyDifferentDirs.

/**
   * verify that edits log and fsimage are in different directories and of a correct size
   */
private void verifyDifferentDirs(FSImage img, long expectedImgSize, long expectedEditsSize) {
    StorageDirectory sd = null;
    for (Iterator<StorageDirectory> it = img.getStorage().dirIterator(); it.hasNext(); ) {
        sd = it.next();
        if (sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
            img.getStorage();
            File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
            LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length() + "; expected = " + expectedImgSize);
            assertEquals(expectedImgSize, imf.length());
        } else if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
            img.getStorage();
            File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
            LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " + edf.length() + "; expected = " + expectedEditsSize);
            assertEquals(expectedEditsSize, edf.length());
        } else {
            fail("Image/Edits directories are not different");
        }
    }
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File)

Example 19 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestStartup method testSNNStartup.

/**
   * secnn-7
   * secondary node copies fsimage and edits into correct separate directories.
   * @throws IOException
   */
@Test
public void testSNNStartup() throws IOException {
    //setUpConfig();
    LOG.info("--starting SecondNN startup test");
    // different name dirs
    config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString());
    config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString());
    // same checkpoint dirs
    config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt_edits")).toString());
    config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString());
    LOG.info("--starting NN ");
    MiniDFSCluster cluster = null;
    SecondaryNameNode sn = null;
    NameNode nn = null;
    try {
        cluster = new MiniDFSCluster.Builder(config).manageDataDfsDirs(false).manageNameDfsDirs(false).build();
        cluster.waitActive();
        nn = cluster.getNameNode();
        assertNotNull(nn);
        // start secondary node
        LOG.info("--starting SecondNN");
        sn = new SecondaryNameNode(config);
        assertNotNull(sn);
        LOG.info("--doing checkpoint");
        // this shouldn't fail
        sn.doCheckpoint();
        LOG.info("--done checkpoint");
        // now verify that image and edits are created in the different directories
        FSImage image = nn.getFSImage();
        //only one
        StorageDirectory sd = image.getStorage().getStorageDir(0);
        assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS);
        image.getStorage();
        File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
        image.getStorage();
        File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
        LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length());
        LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " + edf.length());
        FSImage chkpImage = sn.getFSImage();
        verifyDifferentDirs(chkpImage, imf.length(), edf.length());
    } catch (IOException e) {
        fail(StringUtils.stringifyException(e));
        System.err.println("checkpoint failed");
        throw e;
    } finally {
        if (sn != null)
            sn.shutdown();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Example 20 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestEditLog method testEditLog.

/**
   * Test edit log with different initial buffer size
   * 
   * @param initialSize initial edit log buffer size
   * @throws IOException
   */
private void testEditLog(int initialSize) throws IOException {
    // start a cluster 
    Configuration conf = getConf();
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        final FSNamesystem namesystem = cluster.getNamesystem();
        for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
            File dir = new File(it.next().getPath());
            System.out.println(dir);
        }
        FSImage fsimage = namesystem.getFSImage();
        FSEditLog editLog = fsimage.getEditLog();
        // set small size of flush buffer
        editLog.setOutputBufferCapacity(initialSize);
        // Roll log so new output buffer size takes effect
        // we should now be writing to edits_inprogress_3
        fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
        // Remember the current lastInodeId and will reset it back to test
        // loading editlog segments.The transactions in the following allocate new
        // inode id to write to editlogs but doesn't create ionde in namespace
        long originalLastInodeId = namesystem.dir.getLastInodeId();
        // Create threads and make them run transactions concurrently.
        Thread[] threadId = new Thread[NUM_THREADS];
        for (int i = 0; i < NUM_THREADS; i++) {
            Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS, i * NUM_TRANSACTIONS);
            threadId[i] = new Thread(trans, "TransactionThread-" + i);
            threadId[i].start();
        }
        // wait for all transactions to get over
        for (int i = 0; i < NUM_THREADS; i++) {
            try {
                threadId[i].join();
            } catch (InterruptedException e) {
                // retry 
                i--;
            }
        }
        // Reopen some files as for append
        Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS, NUM_TRANSACTIONS / 2);
        trans.run();
        // Roll another time to finalize edits_inprogress_3
        fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
        // +2 for start/end txns
        long expectedTxns = ((NUM_THREADS + 1) * 2 * NUM_TRANSACTIONS) + 2;
        // Verify that we can read in all the transactions that we have written.
        // If there were any corruptions, it is likely that the reading in
        // of these transactions will throw an exception.
        //
        namesystem.dir.resetLastInodeIdWithoutChecking(originalLastInodeId);
        for (Iterator<StorageDirectory> it = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext(); ) {
            FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
            File editFile = NNStorage.getFinalizedEditsFile(it.next(), 3, 3 + expectedTxns - 1);
            assertTrue("Expect " + editFile + " exists", editFile.exists());
            System.out.println("Verifying file: " + editFile);
            long numEdits = loader.loadFSEdits(new EditLogFileInputStream(editFile), 3);
            int numLeases = namesystem.leaseManager.countLease();
            System.out.println("Number of outstanding leases " + numLeases);
            assertEquals(0, numLeases);
            assertTrue("Verification for " + editFile + " failed. " + "Expected " + expectedTxns + " transactions. " + "Found " + numEdits + " transactions.", numEdits == expectedTxns);
        }
    } finally {
        try {
            if (fileSys != null)
                fileSys.close();
            if (cluster != null)
                cluster.shutdown();
        } catch (Throwable t) {
            LOG.error("Couldn't shut down cleanly", t);
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) URI(java.net.URI) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4