use of org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature in project hadoop by apache.
the class INodeDirectory method undoRename4DstParent.
/**
* Undo the rename operation for the dst tree, i.e., if the rename operation
* (with OVERWRITE option) removes a file/dir from the dst tree, add it back
* and delete possible record in the deleted list.
*/
public void undoRename4DstParent(final BlockStoragePolicySuite bsps, final INode deletedChild, int latestSnapshotId) throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
assert sf != null : "Directory does not have snapshot feature";
boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED, deletedChild);
int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
final boolean added = addChild(deletedChild, true, sid);
// been stored in deleted list before
if (added && !removeDeletedChild) {
final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
addSpaceConsumed(counts, false);
}
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature in project hadoop by apache.
the class INodeDirectory method addSnapshotFeature.
public DirectoryWithSnapshotFeature addSnapshotFeature(DirectoryDiffList diffs) {
Preconditions.checkState(!isWithSnapshot(), "Directory is already with snapshot");
DirectoryWithSnapshotFeature sf = new DirectoryWithSnapshotFeature(diffs);
addFeature(sf);
return sf;
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature in project hadoop by apache.
the class INodeDirectory method addChild.
/**
* Add a child inode to the directory.
*
* @param node INode to insert
* @param setModTime set modification time for the parent node
* not needed when replaying the addition and
* the parent already has the proper mod time
* @return false if the child with this name already exists;
* otherwise, return true;
*/
public boolean addChild(INode node, final boolean setModTime, final int latestSnapshotId) throws QuotaExceededException {
final int low = searchChildren(node.getLocalNameBytes());
if (low >= 0) {
return false;
}
if (isInLatestSnapshot(latestSnapshotId)) {
// create snapshot feature if necessary
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.addChild(this, node, setModTime, latestSnapshotId);
}
addChild(node, low);
if (setModTime) {
// update modification time of the parent directory
updateModificationTime(node.getModificationTime(), latestSnapshotId);
}
return true;
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature in project hadoop by apache.
the class INodeDirectory method cleanSubtree.
@Override
public void cleanSubtree(ReclaimContext reclaimContext, final int snapshotId, int priorSnapshotId) {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
// there is snapshot data
if (sf != null) {
sf.cleanDirectory(reclaimContext, this, snapshotId, priorSnapshotId);
} else {
// there is no snapshot data
if (priorSnapshotId == Snapshot.NO_SNAPSHOT_ID && snapshotId == Snapshot.CURRENT_STATE_ID) {
// destroy the whole subtree and collect blocks that should be deleted
destroyAndCollectBlocks(reclaimContext);
} else {
// make a copy the quota delta
QuotaCounts old = reclaimContext.quotaDelta().getCountsCopy();
// process recursively down the subtree
cleanSubtreeRecursively(reclaimContext, snapshotId, priorSnapshotId, null);
QuotaCounts current = reclaimContext.quotaDelta().getCountsCopy();
current.subtract(old);
if (isQuotaSet()) {
reclaimContext.quotaDelta().addQuotaDirUpdate(this, current);
}
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature in project hadoop by apache.
the class INodesInPath method resolve.
static INodesInPath resolve(final INodeDirectory startingDir, byte[][] components, final boolean isRaw) {
Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
INode curNode = startingDir;
int count = 0;
int inodeNum = 0;
INode[] inodes = new INode[components.length];
boolean isSnapshot = false;
int snapshotId = CURRENT_STATE_ID;
while (count < components.length && curNode != null) {
final boolean lastComp = (count == components.length - 1);
inodes[inodeNum++] = curNode;
final boolean isRef = curNode.isReference();
final boolean isDir = curNode.isDirectory();
final INodeDirectory dir = isDir ? curNode.asDirectory() : null;
if (!isRef && isDir && dir.isWithSnapshot()) {
//if the path is a non-snapshot path, update the latest snapshot.
if (!isSnapshot && shouldUpdateLatestId(dir.getDirectoryWithSnapshotFeature().getLastSnapshotId(), snapshotId)) {
snapshotId = dir.getDirectoryWithSnapshotFeature().getLastSnapshotId();
}
} else if (isRef && isDir && !lastComp) {
// recordModification method.
if (!isSnapshot) {
int dstSnapshotId = curNode.asReference().getDstSnapshotId();
if (// no snapshot in dst tree of rename
snapshotId == CURRENT_STATE_ID || (dstSnapshotId != CURRENT_STATE_ID && dstSnapshotId >= snapshotId)) {
// the above scenario
int lastSnapshot = CURRENT_STATE_ID;
DirectoryWithSnapshotFeature sf;
if (curNode.isDirectory() && (sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) {
lastSnapshot = sf.getLastSnapshotId();
}
snapshotId = lastSnapshot;
}
}
}
if (lastComp || !isDir) {
break;
}
final byte[] childName = components[++count];
// check if the next byte[] in components is for ".snapshot"
if (isDotSnapshotDir(childName) && dir.isSnapshottable()) {
isSnapshot = true;
// check if ".snapshot" is the last element of components
if (count == components.length - 1) {
break;
}
// Resolve snapshot root
final Snapshot s = dir.getSnapshot(components[count + 1]);
if (s == null) {
// snapshot not found
curNode = null;
} else {
curNode = s.getRoot();
snapshotId = s.getId();
}
// combine .snapshot & name into 1 component element to ensure
// 1-to-1 correspondence between components and inodes arrays is
// preserved so a path can be reconstructed.
byte[][] componentsCopy = Arrays.copyOf(components, components.length - 1);
componentsCopy[count] = DFSUtil.string2Bytes(DFSUtil.byteArray2PathString(components, count, 2));
// shift the remaining components after snapshot name
int start = count + 2;
System.arraycopy(components, start, componentsCopy, count + 1, components.length - start);
components = componentsCopy;
// reduce the inodes array to compensate for reduction in components
inodes = Arrays.copyOf(inodes, components.length);
} else {
// normal case, and also for resolving file/dir under snapshot root
curNode = dir.getChild(childName, isSnapshot ? snapshotId : CURRENT_STATE_ID);
}
}
return new INodesInPath(inodes, components, isRaw, isSnapshot, snapshotId);
}
Aggregations