use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hadoop by apache.
the class FSDirSnapshotOp method checkSnapshot.
/**
* Check if the given INode (or one of its descendants) is snapshottable and
* already has snapshots.
*
* @param target The given INode
* @param snapshottableDirs The list of directories that are snapshottable
* but do not have snapshots yet
*/
private static void checkSnapshot(INode target, List<INodeDirectory> snapshottableDirs) throws SnapshotException {
if (target.isDirectory()) {
INodeDirectory targetDir = target.asDirectory();
DirectorySnapshottableFeature sf = targetDir.getDirectorySnapshottableFeature();
if (sf != null) {
if (sf.getNumSnapshots() > 0) {
String fullPath = targetDir.getFullPathName();
throw new SnapshotException("The directory " + fullPath + " cannot be deleted since " + fullPath + " is snapshottable and already has snapshots");
} else {
if (snapshottableDirs != null) {
snapshottableDirs.add(targetDir);
}
}
}
for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
checkSnapshot(child, snapshottableDirs);
}
}
}
use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hadoop by apache.
the class FSDirStatAndListingOp method getSnapshotsListing.
/**
* Get a listing of all the snapshots of a snapshottable directory
*/
private static DirectoryListing getSnapshotsListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter) throws IOException {
Preconditions.checkState(fsd.hasReadLock());
Preconditions.checkArgument(iip.isDotSnapshotDir(), "%s does not end with %s", iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
// drop off the null .snapshot component
iip = iip.getParentINodesInPath();
final String dirPath = iip.getPath();
final INode node = iip.getLastINode();
final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
if (sf == null) {
throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath);
}
final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
final HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing; i++) {
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(fsd, iip, sRoot, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
}
return new DirectoryListing(listing, snapshots.size() - skipSize - numOfListing);
}
use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hadoop by apache.
the class FSNamesystem method listCorruptFileBlocksWithSnapshot.
/**
* Get the list of corrupt blocks and corresponding full file path
* including snapshots in given snapshottable directories.
* @param path Restrict corrupt files to this portion of namespace.
* @param snapshottableDirs Snapshottable directories. Passing in null
* will only return corrupt blocks in non-snapshots.
* @param cookieTab Support for continuation; cookieTab tells where
* to start from.
* @return a list in which each entry describes a corrupt file/block
* @throws IOException
*/
List<String> listCorruptFileBlocksWithSnapshot(String path, List<String> snapshottableDirs, String[] cookieTab) throws IOException {
final Collection<CorruptFileBlockInfo> corruptFileBlocks = listCorruptFileBlocks(path, cookieTab);
List<String> list = new ArrayList<String>();
// Precalculate snapshottableFeature list
List<DirectorySnapshottableFeature> lsf = new ArrayList<>();
if (snapshottableDirs != null) {
for (String snap : snapshottableDirs) {
final INode isnap = getFSDirectory().getINode(snap, DirOp.READ_LINK);
final DirectorySnapshottableFeature sf = isnap.asDirectory().getDirectorySnapshottableFeature();
if (sf == null) {
throw new SnapshotException("Directory is not a snapshottable directory: " + snap);
}
lsf.add(sf);
}
}
for (CorruptFileBlockInfo c : corruptFileBlocks) {
if (getFileInfo(c.path, true) != null) {
list.add(c.toString());
}
final Collection<String> snaps = FSDirSnapshotOp.getSnapshotFiles(getFSDirectory(), lsf, c.path);
if (snaps != null) {
for (String snap : snaps) {
// follow the syntax of CorruptFileBlockInfo#toString()
list.add(c.block.getBlockName() + "\t" + snap);
}
}
}
return list;
}
use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hadoop by apache.
the class DirectorySnapshottableFeature method renameSnapshot.
/**
* Rename a snapshot
* @param path
* The directory path where the snapshot was taken. Used for
* generating exception message.
* @param oldName
* Old name of the snapshot
* @param newName
* New name the snapshot will be renamed to
* @throws SnapshotException
* Throw SnapshotException when either the snapshot with the old
* name does not exist or a snapshot with the new name already
* exists
*/
public void renameSnapshot(String path, String oldName, String newName) throws SnapshotException {
if (newName.equals(oldName)) {
return;
}
final int indexOfOld = searchSnapshot(DFSUtil.string2Bytes(oldName));
if (indexOfOld < 0) {
throw new SnapshotException("The snapshot " + oldName + " does not exist for directory " + path);
} else {
final byte[] newNameBytes = DFSUtil.string2Bytes(newName);
int indexOfNew = searchSnapshot(newNameBytes);
if (indexOfNew >= 0) {
throw new SnapshotException("The snapshot " + newName + " already exists for directory " + path);
}
// remove the one with old name from snapshotsByNames
Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
final INodeDirectory ssRoot = snapshot.getRoot();
ssRoot.setLocalName(newNameBytes);
indexOfNew = -indexOfNew - 1;
if (indexOfNew <= indexOfOld) {
snapshotsByNames.add(indexOfNew, snapshot);
} else {
// indexOfNew > indexOfOld
snapshotsByNames.add(indexOfNew - 1, snapshot);
}
}
}
use of org.apache.hadoop.hdfs.protocol.SnapshotException in project hive by apache.
the class SnapshotUtils method isSnapshotAvailable.
/**
* Checks whether a given snapshot exists or not.
* @param dfs DistributedFileSystem.
* @param path path of snapshot.
* @param snapshotPrefix snapshot name prefix.
* @param snapshotName name of snapshot.
* @param conf Hive configuration.
* @return true if the snapshot exists.
* @throws IOException in case of any error.
*/
public static boolean isSnapshotAvailable(DistributedFileSystem dfs, Path path, String snapshotPrefix, String snapshotName, HiveConf conf) throws IOException {
AtomicBoolean isSnapAvlb = new AtomicBoolean(false);
Retryable retryable = Retryable.builder().withHiveConf(conf).withRetryOnException(IOException.class).withFailOnException(SnapshotException.class).build();
try {
retryable.executeCallable(() -> {
isSnapAvlb.set(dfs.exists(new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR + "/" + snapshotPrefix + snapshotName)));
LOG.debug("Snapshot for path {} is {}", path, isSnapAvlb.get() ? "available" : "unavailable");
return null;
});
} catch (Exception e) {
throw new SnapshotException("Failed to check if snapshot is available on " + path, e);
}
return isSnapAvlb.get();
}
Aggregations