use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class FSImage method confirmFormat.
/**
* Check whether the storage directories and non-file journals exist.
* If running in interactive mode, will prompt the user for each
* directory to allow them to format anyway. Otherwise, returns
* false, unless 'force' is specified.
*
* @param force if true, format regardless of whether dirs exist
* @param interactive prompt the user when a dir exists
* @return true if formatting should proceed
* @throws IOException if some storage cannot be accessed
*/
boolean confirmFormat(boolean force, boolean interactive) throws IOException {
List<FormatConfirmable> confirms = Lists.newArrayList();
for (StorageDirectory sd : storage.dirIterable(null)) {
confirms.add(sd);
}
confirms.addAll(editLog.getFormatConfirmables());
return Storage.confirmFormat(confirms, force, interactive);
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class FSNamesystem method getNameDirStatuses.
// NameNodeMXBean
@Override
public String getNameDirStatuses() {
Map<String, Map<File, StorageDirType>> statusMap = new HashMap<String, Map<File, StorageDirType>>();
Map<File, StorageDirType> activeDirs = new HashMap<File, StorageDirType>();
for (Iterator<StorageDirectory> it = getFSImage().getStorage().dirIterator(); it.hasNext(); ) {
StorageDirectory st = it.next();
activeDirs.put(st.getRoot(), st.getStorageDirType());
}
statusMap.put("active", activeDirs);
List<Storage.StorageDirectory> removedStorageDirs = getFSImage().getStorage().getRemovedStorageDirs();
Map<File, StorageDirType> failedDirs = new HashMap<File, StorageDirType>();
for (StorageDirectory st : removedStorageDirs) {
failedDirs.put(st.getRoot(), st.getStorageDirType());
}
statusMap.put("failed", failedDirs);
return JSON.toString(statusMap);
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class BootstrapStandby method doUpgrade.
private void doUpgrade(NNStorage storage) throws IOException {
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
StorageDirectory sd = it.next();
NNUpgradeUtil.doUpgrade(sd, storage);
}
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestJournalNodeSync method testMultipleJournalsMultipleMissingLogs.
@Test(timeout = 60000)
public void testMultipleJournalsMultipleMissingLogs() throws Exception {
File firstJournalDir = jCluster.getJournalDir(0, jid);
File firstJournalCurrentDir = new StorageDirectory(firstJournalDir).getCurrentDir();
File secondJournalDir = jCluster.getJournalDir(1, jid);
File secondJournalCurrentDir = new StorageDirectory(secondJournalDir).getCurrentDir();
File thirdJournalDir = jCluster.getJournalDir(2, jid);
File thirdJournalCurrentDir = new StorageDirectory(thirdJournalDir).getCurrentDir();
// Generate some edit logs and delete multiple logs in multiple journals.
long firstTxId = generateEditLog();
long secondTxId = generateEditLog();
long thirdTxId = generateEditLog();
List<File> missingLogs = Lists.newArrayList();
missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
missingLogs.add(deleteEditLog(secondJournalCurrentDir, firstTxId));
missingLogs.add(deleteEditLog(secondJournalCurrentDir, secondTxId));
missingLogs.add(deleteEditLog(thirdJournalCurrentDir, thirdTxId));
GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestJournalNodeSync method testSyncForDiscontinuousMissingLogs.
@Test(timeout = 30000)
public void testSyncForDiscontinuousMissingLogs() throws Exception {
File firstJournalDir = jCluster.getJournalDir(0, jid);
File firstJournalCurrentDir = new StorageDirectory(firstJournalDir).getCurrentDir();
// Generate some edit logs and delete two discontinuous logs.
long firstTxId = generateEditLog();
generateEditLog();
long nextTxId = generateEditLog();
List<File> missingLogs = Lists.newArrayList();
missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
missingLogs.add(deleteEditLog(firstJournalCurrentDir, nextTxId));
GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 10000);
}
Aggregations