Search in sources :

Example 46 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImage method confirmFormat.

/**
   * Check whether the storage directories and non-file journals exist.
   * If running in interactive mode, will prompt the user for each
   * directory to allow them to format anyway. Otherwise, returns
   * false, unless 'force' is specified.
   * 
   * @param force if true, format regardless of whether dirs exist
   * @param interactive prompt the user when a dir exists
   * @return true if formatting should proceed
   * @throws IOException if some storage cannot be accessed
   */
boolean confirmFormat(boolean force, boolean interactive) throws IOException {
    List<FormatConfirmable> confirms = Lists.newArrayList();
    for (StorageDirectory sd : storage.dirIterable(null)) {
        confirms.add(sd);
    }
    confirms.addAll(editLog.getFormatConfirmables());
    return Storage.confirmFormat(confirms, force, interactive);
}
Also used : FormatConfirmable(org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)

Example 47 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSNamesystem method getNameDirStatuses.

// NameNodeMXBean
@Override
public String getNameDirStatuses() {
    Map<String, Map<File, StorageDirType>> statusMap = new HashMap<String, Map<File, StorageDirType>>();
    Map<File, StorageDirType> activeDirs = new HashMap<File, StorageDirType>();
    for (Iterator<StorageDirectory> it = getFSImage().getStorage().dirIterator(); it.hasNext(); ) {
        StorageDirectory st = it.next();
        activeDirs.put(st.getRoot(), st.getStorageDirType());
    }
    statusMap.put("active", activeDirs);
    List<Storage.StorageDirectory> removedStorageDirs = getFSImage().getStorage().getRemovedStorageDirs();
    Map<File, StorageDirType> failedDirs = new HashMap<File, StorageDirType>();
    for (StorageDirectory st : removedStorageDirs) {
        failedDirs.put(st.getRoot(), st.getStorageDirType());
    }
    statusMap.put("failed", failedDirs);
    return JSON.toString(statusMap);
}
Also used : HashMap(java.util.HashMap) StorageDirType(org.apache.hadoop.hdfs.server.common.Storage.StorageDirType) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Map(java.util.Map) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 48 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class BootstrapStandby method doUpgrade.

private void doUpgrade(NNStorage storage) throws IOException {
    for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
        StorageDirectory sd = it.next();
        NNUpgradeUtil.doUpgrade(sd, storage);
    }
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)

Example 49 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestJournalNodeSync method testMultipleJournalsMultipleMissingLogs.

@Test(timeout = 60000)
public void testMultipleJournalsMultipleMissingLogs() throws Exception {
    File firstJournalDir = jCluster.getJournalDir(0, jid);
    File firstJournalCurrentDir = new StorageDirectory(firstJournalDir).getCurrentDir();
    File secondJournalDir = jCluster.getJournalDir(1, jid);
    File secondJournalCurrentDir = new StorageDirectory(secondJournalDir).getCurrentDir();
    File thirdJournalDir = jCluster.getJournalDir(2, jid);
    File thirdJournalCurrentDir = new StorageDirectory(thirdJournalDir).getCurrentDir();
    // Generate some edit logs and delete multiple logs in multiple journals.
    long firstTxId = generateEditLog();
    long secondTxId = generateEditLog();
    long thirdTxId = generateEditLog();
    List<File> missingLogs = Lists.newArrayList();
    missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
    missingLogs.add(deleteEditLog(secondJournalCurrentDir, firstTxId));
    missingLogs.add(deleteEditLog(secondJournalCurrentDir, secondTxId));
    missingLogs.add(deleteEditLog(thirdJournalCurrentDir, thirdTxId));
    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File) FileJournalManager.getLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) Test(org.junit.Test)

Example 50 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestJournalNodeSync method testSyncForDiscontinuousMissingLogs.

@Test(timeout = 30000)
public void testSyncForDiscontinuousMissingLogs() throws Exception {
    File firstJournalDir = jCluster.getJournalDir(0, jid);
    File firstJournalCurrentDir = new StorageDirectory(firstJournalDir).getCurrentDir();
    // Generate some edit logs and delete two discontinuous logs.
    long firstTxId = generateEditLog();
    generateEditLog();
    long nextTxId = generateEditLog();
    List<File> missingLogs = Lists.newArrayList();
    missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
    missingLogs.add(deleteEditLog(firstJournalCurrentDir, nextTxId));
    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 10000);
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File) FileJournalManager.getLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) Test(org.junit.Test)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4