Search in sources :

Example 51 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestEditLog method mockStorageWithEdits.

/**
   * Create a mock NNStorage object with several directories, each directory
   * holding edit logs according to a specification. Each directory
   * is specified by a pipe-separated string. For example:
   * <code>[1,100]|[101,200]</code> specifies a directory which
   * includes two finalized segments, one from 1-100, and one from 101-200.
   * The syntax <code>[1,]</code> specifies an in-progress log starting at
   * txid 1.
   */
private NNStorage mockStorageWithEdits(String... editsDirSpecs) throws IOException {
    List<StorageDirectory> sds = Lists.newArrayList();
    List<URI> uris = Lists.newArrayList();
    NNStorage storage = Mockito.mock(NNStorage.class);
    for (String dirSpec : editsDirSpecs) {
        List<String> files = Lists.newArrayList();
        String[] logSpecs = dirSpec.split("\\|");
        for (String logSpec : logSpecs) {
            Matcher m = Pattern.compile("\\[(\\d+),(\\d+)?\\]").matcher(logSpec);
            assertTrue("bad spec: " + logSpec, m.matches());
            if (m.group(2) == null) {
                files.add(NNStorage.getInProgressEditsFileName(Long.parseLong(m.group(1))));
            } else {
                files.add(NNStorage.getFinalizedEditsFileName(Long.parseLong(m.group(1)), Long.parseLong(m.group(2))));
            }
        }
        StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(NameNodeDirType.EDITS, false, files.toArray(new String[0]));
        sds.add(sd);
        URI u = URI.create("file:///storage" + Math.random());
        Mockito.doReturn(sd).when(storage).getStorageDirectory(u);
        uris.add(u);
    }
    Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
    Mockito.doReturn(uris).when(storage).getEditsDirectories();
    return storage;
}
Also used : Matcher(java.util.regex.Matcher) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) URI(java.net.URI)

Example 52 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestEditLogRace method verifyEditLogs.

private long verifyEditLogs(FSNamesystem namesystem, FSImage fsimage, String logFileName, long startTxId) throws IOException {
    long numEdits = -1;
    // of these transactions will throw an exception.
    for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
        File editFile = new File(sd.getCurrentDir(), logFileName);
        System.out.println("Verifying file: " + editFile);
        FSEditLogLoader loader = new FSEditLogLoader(namesystem, startTxId);
        long numEditsThisLog = loader.loadFSEdits(new EditLogFileInputStream(editFile), startTxId);
        System.out.println("Number of edits: " + numEditsThisLog);
        assertTrue(numEdits == -1 || numEditsThisLog == numEdits);
        numEdits = numEditsThisLog;
    }
    assertTrue(numEdits != -1);
    return numEdits;
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File)

Example 53 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestClusterId method getClusterId.

private String getClusterId(Configuration config) throws IOException {
    // see if cluster id not empty.
    Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
    List<URI> editsToFormat = FSNamesystem.getNamespaceEditsDirs(config);
    FSImage fsImage = new FSImage(config, dirsToFormat, editsToFormat);
    Iterator<StorageDirectory> sdit = fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);
    StorageDirectory sd = sdit.next();
    Properties props = Storage.readPropertiesFile(sd.getVersionFile());
    String cid = props.getProperty("clusterID");
    LOG.info("successfully formated : sd=" + sd.getCurrentDir() + ";cid=" + cid);
    return cid;
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) Properties(java.util.Properties) URI(java.net.URI)

Example 54 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestFileJournalManager method testGetRemoteEditLog.

@Test
public void testGetRemoteEditLog() throws IOException {
    StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(NameNodeDirType.EDITS, false, NNStorage.getFinalizedEditsFileName(1, 100), NNStorage.getFinalizedEditsFileName(101, 200), NNStorage.getInProgressEditsFileName(201), NNStorage.getFinalizedEditsFileName(1001, 1100));
    // passing null for NNStorage because this unit test will not use it
    FileJournalManager fjm = new FileJournalManager(conf, sd, null);
    assertEquals("[1,100],[101,200],[1001,1100]", getLogsAsString(fjm, 1));
    assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101));
    assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 150));
    assertEquals("[1001,1100]", getLogsAsString(fjm, 201));
    assertEquals("Asking for a newer log than exists should return empty list", "", getLogsAsString(fjm, 9999));
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) Test(org.junit.Test)

Example 55 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestFileJournalManager method testReadFromMiddleOfEditLog.

/**
   * Make sure that we starting reading the correct op when we request a stream
   * with a txid in the middle of an edit log file.
   */
@Test
public void testReadFromMiddleOfEditLog() throws CorruptionException, IOException {
    File f = new File(TestEditLog.TEST_DIR + "/readfrommiddleofeditlog");
    NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10);
    StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    EditLogInputStream elis = getJournalInputStream(jm, 5, true);
    try {
        FSEditLogOp op = elis.readOp();
        assertEquals("read unexpected op", op.getTransactionId(), 5);
    } finally {
        IOUtils.cleanup(LOG, elis);
    }
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4