use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class FSImage method doRollback.
void doRollback(FSNamesystem fsns) throws IOException {
// Rollback is allowed only if there is
// a previous fs states in at least one of the storage directories.
// Directories that don't have previous state do not rollback
boolean canRollback = false;
FSImage prevState = new FSImage(conf);
try {
prevState.getStorage().layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
StorageDirectory sd = it.next();
if (!NNUpgradeUtil.canRollBack(sd, storage, prevState.getStorage(), HdfsServerConstants.NAMENODE_LAYOUT_VERSION)) {
continue;
}
LOG.info("Can perform rollback for " + sd);
canRollback = true;
}
if (fsns.isHaEnabled()) {
// If HA is enabled, check if the shared log can be rolled back as well.
editLog.initJournalsForWrite();
boolean canRollBackSharedEditLog = editLog.canRollBackSharedLog(prevState.getStorage(), HdfsServerConstants.NAMENODE_LAYOUT_VERSION);
if (canRollBackSharedEditLog) {
LOG.info("Can perform rollback for shared edit log.");
canRollback = true;
}
}
if (!canRollback)
throw new IOException("Cannot rollback. None of the storage " + "directories contain previous fs state.");
// Do rollback for each directory containing previous state
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
StorageDirectory sd = it.next();
LOG.info("Rolling back storage directory " + sd.getRoot() + ".\n new LV = " + prevState.getStorage().getLayoutVersion() + "; new CTime = " + prevState.getStorage().getCTime());
NNUpgradeUtil.doRollBack(sd);
}
if (fsns.isHaEnabled()) {
// If HA is enabled, try to roll back the shared log as well.
editLog.doRollback();
}
isUpgradeFinalized = true;
} finally {
prevState.close();
}
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class FSImage method renameCheckpoint.
/**
* Rename all the fsimage files with the specific NameNodeFile type. The
* associated checksum files will also be renamed.
*/
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf) throws IOException {
ArrayList<StorageDirectory> al = null;
FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
storage.inspectStorageDirs(inspector);
for (FSImageFile image : inspector.getFoundImages()) {
try {
renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
} catch (IOException ioe) {
LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
if (al == null) {
al = Lists.newArrayList();
}
al.add(image.sd);
}
}
if (al != null) {
storage.reportErrorsOnDirectories(al);
}
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class FSImage method finalizeUpgrade.
void finalizeUpgrade(boolean finalizeEditLog) throws IOException {
LOG.info("Finalizing upgrade for local dirs. " + (storage.getLayoutVersion() == 0 ? "" : "\n cur LV = " + storage.getLayoutVersion() + "; cur CTime = " + storage.getCTime()));
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext(); ) {
StorageDirectory sd = it.next();
NNUpgradeUtil.doFinalize(sd);
}
if (finalizeEditLog) {
// We only do this in the case that HA is enabled and we're active. In any
// other case the NN will have done the upgrade of the edits directories
// already by virtue of the fact that they're local.
editLog.doFinalizeOfSharedLog();
}
isUpgradeFinalized = true;
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class FSImage method loadFSImageFile.
void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery, FSImageFile imageFile, StartupOption startupOption) throws IOException {
LOG.info("Planning to load image: " + imageFile);
StorageDirectory sdForProperties = imageFile.sd;
storage.readProperties(sdForProperties, startupOption);
if (NameNodeLayoutVersion.supports(LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
// For txid-based layout, we should have a .md5 file
// next to the image file
boolean isRollingRollback = RollingUpgradeStartupOption.ROLLBACK.matches(startupOption);
loadFSImage(imageFile.getFile(), target, recovery, isRollingRollback);
} else if (NameNodeLayoutVersion.supports(LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
// In 0.22, we have the checksum stored in the VERSION file.
String md5 = storage.getDeprecatedProperty(NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
if (md5 == null) {
throw new InconsistentFSStateException(sdForProperties.getRoot(), "Message digest property " + NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY + " not set for storage directory " + sdForProperties.getRoot());
}
loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery, false);
} else {
// We don't have any record of the md5sum
loadFSImage(imageFile.getFile(), null, target, recovery, false);
}
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class FSImage method deleteCancelledCheckpoint.
/**
* Deletes the checkpoint file in every storage directory,
* since the checkpoint was cancelled.
*/
private void deleteCancelledCheckpoint(long txid) throws IOException {
ArrayList<StorageDirectory> al = Lists.newArrayList();
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
File ckpt = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
if (ckpt.exists() && !ckpt.delete()) {
LOG.warn("Unable to delete cancelled checkpoint in " + sd);
al.add(sd);
}
}
storage.reportErrorsOnDirectories(al);
}
Aggregations