Search in sources :

Example 41 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestEditLog method testEditChecksum.

@Test
public void testEditChecksum() throws Exception {
    // start a cluster 
    Configuration conf = getConf();
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();
    FSImage fsimage = namesystem.getFSImage();
    final FSEditLog editLog = fsimage.getEditLog();
    fileSys.mkdirs(new Path("/tmp"));
    Iterator<StorageDirectory> iter = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS);
    LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>();
    while (iter.hasNext()) {
        sds.add(iter.next());
    }
    editLog.close();
    cluster.shutdown();
    for (StorageDirectory sd : sds) {
        File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
        assertTrue(editFile.exists());
        long fileLen = editFile.length();
        LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen);
        RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
        // seek to checksum bytes
        rwf.seek(fileLen - 4);
        int b = rwf.readInt();
        rwf.seek(fileLen - 4);
        rwf.writeInt(b + 1);
        rwf.close();
    }
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
        fail("should not be able to start");
    } catch (IOException e) {
        // expected
        assertNotNull("Cause of exception should be ChecksumException", e.getCause());
        assertEquals("Cause of exception should be ChecksumException", ChecksumException.class, e.getCause().getClass());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ChecksumException(org.apache.hadoop.fs.ChecksumException) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) LinkedList(java.util.LinkedList) RandomAccessFile(java.io.RandomAccessFile) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 42 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestEditLog method mockStorageWithEdits.

/**
   * Create a mock NNStorage object with several directories, each directory
   * holding edit logs according to a specification. Each directory
   * is specified by a pipe-separated string. For example:
   * <code>[1,100]|[101,200]</code> specifies a directory which
   * includes two finalized segments, one from 1-100, and one from 101-200.
   * The syntax <code>[1,]</code> specifies an in-progress log starting at
   * txid 1.
   */
private NNStorage mockStorageWithEdits(String... editsDirSpecs) throws IOException {
    List<StorageDirectory> sds = Lists.newArrayList();
    List<URI> uris = Lists.newArrayList();
    NNStorage storage = Mockito.mock(NNStorage.class);
    for (String dirSpec : editsDirSpecs) {
        List<String> files = Lists.newArrayList();
        String[] logSpecs = dirSpec.split("\\|");
        for (String logSpec : logSpecs) {
            Matcher m = Pattern.compile("\\[(\\d+),(\\d+)?\\]").matcher(logSpec);
            assertTrue("bad spec: " + logSpec, m.matches());
            if (m.group(2) == null) {
                files.add(NNStorage.getInProgressEditsFileName(Long.parseLong(m.group(1))));
            } else {
                files.add(NNStorage.getFinalizedEditsFileName(Long.parseLong(m.group(1)), Long.parseLong(m.group(2))));
            }
        }
        StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(NameNodeDirType.EDITS, false, files.toArray(new String[0]));
        sds.add(sd);
        URI u = URI.create("file:///storage" + Math.random());
        Mockito.doReturn(sd).when(storage).getStorageDirectory(u);
        uris.add(u);
    }
    Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
    Mockito.doReturn(uris).when(storage).getEditsDirectories();
    return storage;
}
Also used : Matcher(java.util.regex.Matcher) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) URI(java.net.URI)

Example 43 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestEditLogRace method verifyEditLogs.

private long verifyEditLogs(FSNamesystem namesystem, FSImage fsimage, String logFileName, long startTxId) throws IOException {
    long numEdits = -1;
    // of these transactions will throw an exception.
    for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
        File editFile = new File(sd.getCurrentDir(), logFileName);
        System.out.println("Verifying file: " + editFile);
        FSEditLogLoader loader = new FSEditLogLoader(namesystem, startTxId);
        long numEditsThisLog = loader.loadFSEdits(new EditLogFileInputStream(editFile), startTxId);
        System.out.println("Number of edits: " + numEditsThisLog);
        assertTrue(numEdits == -1 || numEditsThisLog == numEdits);
        numEdits = numEditsThisLog;
    }
    assertTrue(numEdits != -1);
    return numEdits;
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File)

Example 44 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestClusterId method getClusterId.

private String getClusterId(Configuration config) throws IOException {
    // see if cluster id not empty.
    Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
    List<URI> editsToFormat = FSNamesystem.getNamespaceEditsDirs(config);
    FSImage fsImage = new FSImage(config, dirsToFormat, editsToFormat);
    Iterator<StorageDirectory> sdit = fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);
    StorageDirectory sd = sdit.next();
    Properties props = Storage.readPropertiesFile(sd.getVersionFile());
    String cid = props.getProperty("clusterID");
    LOG.info("successfully formated : sd=" + sd.getCurrentDir() + ";cid=" + cid);
    return cid;
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) Properties(java.util.Properties) URI(java.net.URI)

Example 45 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestFileJournalManager method testGetRemoteEditLog.

@Test
public void testGetRemoteEditLog() throws IOException {
    StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(NameNodeDirType.EDITS, false, NNStorage.getFinalizedEditsFileName(1, 100), NNStorage.getFinalizedEditsFileName(101, 200), NNStorage.getInProgressEditsFileName(201), NNStorage.getFinalizedEditsFileName(1001, 1100));
    // passing null for NNStorage because this unit test will not use it
    FileJournalManager fjm = new FileJournalManager(conf, sd, null);
    assertEquals("[1,100],[101,200],[1001,1100]", getLogsAsString(fjm, 1));
    assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101));
    assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 150));
    assertEquals("[1001,1100]", getLogsAsString(fjm, 201));
    assertEquals("Asking for a newer log than exists should return empty list", "", getLogsAsString(fjm, 9999));
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) Test(org.junit.Test)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4