use of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile in project hadoop by apache.
the class FSImage method renameCheckpoint.
/**
* Rename all the fsimage files with the specific NameNodeFile type. The
* associated checksum files will also be renamed.
*/
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf) throws IOException {
ArrayList<StorageDirectory> al = null;
FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
storage.inspectStorageDirs(inspector);
for (FSImageFile image : inspector.getFoundImages()) {
try {
renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
} catch (IOException ioe) {
LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
if (al == null) {
al = Lists.newArrayList();
}
al.add(image.sd);
}
}
if (al != null) {
storage.reportErrorsOnDirectories(al);
}
}
use of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile in project hadoop by apache.
the class FSImage method saveNamespace.
/**
* @param timeWindow a checkpoint is done if the latest checkpoint
* was done more than this number of seconds ago.
* @param txGap a checkpoint is done also if the gap between the latest tx id
* and the latest checkpoint is greater than this number.
* @return true if a checkpoint has been made
* @see #saveNamespace(FSNamesystem, NameNodeFile, Canceler)
*/
public synchronized boolean saveNamespace(long timeWindow, long txGap, FSNamesystem source) throws IOException {
if (timeWindow > 0 || txGap > 0) {
final FSImageStorageInspector inspector = storage.readAndInspectDirs(EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK), StartupOption.REGULAR);
FSImageFile image = inspector.getLatestImages().get(0);
File imageFile = image.getFile();
final long checkpointTxId = image.getCheckpointTxId();
final long checkpointAge = Time.now() - imageFile.lastModified();
if (checkpointAge <= timeWindow * 1000 && checkpointTxId >= this.getCorrectLastAppliedOrWrittenTxId() - txGap) {
return false;
}
}
saveNamespace(source, NameNodeFile.IMAGE, null);
return true;
}
use of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile in project hadoop by apache.
the class NNStorageRetentionManager method getImageTxIdToRetain.
/**
* @param inspector inspector that has already inspected all storage dirs
* @return the transaction ID corresponding to the oldest checkpoint
* that should be retained.
*/
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {
List<FSImageFile> images = inspector.getFoundImages();
TreeSet<Long> imageTxIds = Sets.newTreeSet();
for (FSImageFile image : images) {
imageTxIds.add(image.getCheckpointTxId());
}
List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
if (imageTxIdsList.isEmpty()) {
return 0;
}
Collections.reverse(imageTxIdsList);
int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());
long minTxId = imageTxIdsList.get(toRetain - 1);
LOG.info("Going to retain " + toRetain + " images with txid >= " + minTxId);
return minTxId;
}
use of org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile in project hadoop by apache.
the class FSImageTestUtil method assertSameNewestImage.
/**
* Assert that all of the given directories have the same newest filename
* for fsimage that they hold the same data.
*/
public static void assertSameNewestImage(List<File> dirs) throws Exception {
if (dirs.size() < 2)
return;
long imageTxId = -1;
List<File> imageFiles = new ArrayList<File>();
for (File dir : dirs) {
FSImageTransactionalStorageInspector inspector = inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
List<FSImageFile> latestImages = inspector.getLatestImages();
assert (!latestImages.isEmpty());
long thisTxId = latestImages.get(0).getCheckpointTxId();
if (imageTxId != -1 && thisTxId != imageTxId) {
fail("Storage directory " + dir + " does not have the same " + "last image index " + imageTxId + " as another");
}
imageTxId = thisTxId;
imageFiles.add(inspector.getLatestImages().get(0).getFile());
}
assertFileContentsSame(imageFiles.toArray(new File[0]));
}
Aggregations