use of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot in project hadoop by apache.
the class FSDirSnapshotOp method getSnapshotFiles.
/** Get a collection of full snapshot paths given file and snapshot dir.
* @param lsf a list of snapshottable features
* @param file full path of the file
* @return collection of full paths of snapshot of the file
*/
static Collection<String> getSnapshotFiles(FSDirectory fsd, List<DirectorySnapshottableFeature> lsf, String file) throws IOException {
ArrayList<String> snaps = new ArrayList<>();
for (DirectorySnapshottableFeature sf : lsf) {
// for each snapshottable dir e.g. /dir1, /dir2
final ReadOnlyList<Snapshot> lsnap = sf.getSnapshotList();
for (Snapshot s : lsnap) {
// for each snapshot name under snapshottable dir
// e.g. /dir1/.snapshot/s1, /dir1/.snapshot/s2
final String dirName = s.getRoot().getRootFullPathName();
if (!file.startsWith(dirName)) {
// file not in current snapshot root dir, no need to check other snaps
break;
}
String snapname = s.getRoot().getFullPathName();
if (dirName.equals(Path.SEPARATOR)) {
// handle rootDir
snapname += Path.SEPARATOR;
}
snapname += file.substring(file.indexOf(dirName) + dirName.length());
if (fsd.getFSNamesystem().getFileInfo(snapname, true) != null) {
snaps.add(snapname);
}
}
}
return snaps;
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot in project hadoop by apache.
the class FSDirStatAndListingOp method getSnapshotsListing.
/**
* Get a listing of all the snapshots of a snapshottable directory
*/
private static DirectoryListing getSnapshotsListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter) throws IOException {
Preconditions.checkState(fsd.hasReadLock());
Preconditions.checkArgument(iip.isDotSnapshotDir(), "%s does not end with %s", iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
// drop off the null .snapshot component
iip = iip.getParentINodesInPath();
final String dirPath = iip.getPath();
final INode node = iip.getLastINode();
final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
if (sf == null) {
throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath);
}
final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
final HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing; i++) {
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(fsd, iip, sRoot, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
}
return new DirectoryListing(listing, snapshots.size() - skipSize - numOfListing);
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot in project hadoop by apache.
the class INodesInPath method resolve.
static INodesInPath resolve(final INodeDirectory startingDir, byte[][] components, final boolean isRaw) {
Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
INode curNode = startingDir;
int count = 0;
int inodeNum = 0;
INode[] inodes = new INode[components.length];
boolean isSnapshot = false;
int snapshotId = CURRENT_STATE_ID;
while (count < components.length && curNode != null) {
final boolean lastComp = (count == components.length - 1);
inodes[inodeNum++] = curNode;
final boolean isRef = curNode.isReference();
final boolean isDir = curNode.isDirectory();
final INodeDirectory dir = isDir ? curNode.asDirectory() : null;
if (!isRef && isDir && dir.isWithSnapshot()) {
//if the path is a non-snapshot path, update the latest snapshot.
if (!isSnapshot && shouldUpdateLatestId(dir.getDirectoryWithSnapshotFeature().getLastSnapshotId(), snapshotId)) {
snapshotId = dir.getDirectoryWithSnapshotFeature().getLastSnapshotId();
}
} else if (isRef && isDir && !lastComp) {
// recordModification method.
if (!isSnapshot) {
int dstSnapshotId = curNode.asReference().getDstSnapshotId();
if (// no snapshot in dst tree of rename
snapshotId == CURRENT_STATE_ID || (dstSnapshotId != CURRENT_STATE_ID && dstSnapshotId >= snapshotId)) {
// the above scenario
int lastSnapshot = CURRENT_STATE_ID;
DirectoryWithSnapshotFeature sf;
if (curNode.isDirectory() && (sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) {
lastSnapshot = sf.getLastSnapshotId();
}
snapshotId = lastSnapshot;
}
}
}
if (lastComp || !isDir) {
break;
}
final byte[] childName = components[++count];
// check if the next byte[] in components is for ".snapshot"
if (isDotSnapshotDir(childName) && dir.isSnapshottable()) {
isSnapshot = true;
// check if ".snapshot" is the last element of components
if (count == components.length - 1) {
break;
}
// Resolve snapshot root
final Snapshot s = dir.getSnapshot(components[count + 1]);
if (s == null) {
// snapshot not found
curNode = null;
} else {
curNode = s.getRoot();
snapshotId = s.getId();
}
// combine .snapshot & name into 1 component element to ensure
// 1-to-1 correspondence between components and inodes arrays is
// preserved so a path can be reconstructed.
byte[][] componentsCopy = Arrays.copyOf(components, components.length - 1);
componentsCopy[count] = DFSUtil.string2Bytes(DFSUtil.byteArray2PathString(components, count, 2));
// shift the remaining components after snapshot name
int start = count + 2;
System.arraycopy(components, start, componentsCopy, count + 1, components.length - start);
components = componentsCopy;
// reduce the inodes array to compensate for reduction in components
inodes = Arrays.copyOf(inodes, components.length);
} else {
// normal case, and also for resolving file/dir under snapshot root
curNode = dir.getChild(childName, isSnapshot ? snapshotId : CURRENT_STATE_ID);
}
}
return new INodesInPath(inodes, components, isRaw, isSnapshot, snapshotId);
}
Aggregations