use of alluxio.exception.DirectoryNotEmptyException in project alluxio by Alluxio.
the class DefaultFileSystemMaster method unmountInternal.
/**
* Unmounts a UFS path previously mounted onto an Alluxio path.
*
* This method does not delete blocks. Instead, it adds the to the passed-in block deletion
* context so that the blocks can be deleted after the inode deletion journal entry has been
* written. We cannot delete blocks earlier because the inode deletion may fail, leaving us with
* inode containing deleted blocks.
*
* @param rpcContext the rpc context
* @param inodePath the Alluxio path to unmount, must be a mount point
*/
private void unmountInternal(RpcContext rpcContext, LockedInodePath inodePath) throws InvalidPathException, FileDoesNotExistException, IOException {
if (!inodePath.fullPathExists()) {
throw new FileDoesNotExistException("Failed to unmount: Path " + inodePath.getUri() + " does not exist");
}
MountInfo mountInfo = mMountTable.getMountTable().get(inodePath.getUri().getPath());
if (mountInfo == null) {
throw new InvalidPathException("Failed to unmount " + inodePath.getUri() + ". Please ensure" + " the path is an existing mount point.");
}
mSyncManager.stopSyncForMount(mountInfo.getMountId());
if (!mMountTable.delete(rpcContext, inodePath.getUri(), true)) {
throw new InvalidPathException("Failed to unmount " + inodePath.getUri() + ". Please ensure" + " the path is an existing mount point and not root.");
}
try {
// Use the internal delete API, setting {@code alluxioOnly} to true to prevent the delete
// operations from being persisted in the UFS.
deleteInternal(rpcContext, inodePath, DeleteContext.mergeFrom(DeletePOptions.newBuilder().setRecursive(true).setAlluxioOnly(true)), true);
} catch (DirectoryNotEmptyException e) {
throw new RuntimeException(String.format("We should never see this exception because %s should never be thrown when recursive " + "is true.", e.getClass()));
}
}
use of alluxio.exception.DirectoryNotEmptyException in project alluxio by Alluxio.
the class DefaultFileSystemMaster method deleteInternal.
/**
* Implements file deletion.
* <p>
* This method does not delete blocks. Instead, it returns deleted inodes so that their blocks can
* be deleted after the inode deletion journal entry has been written. We cannot delete blocks
* earlier because the inode deletion may fail, leaving us with inode containing deleted blocks.
*
* This method is used at:
* (1) delete()
* (2) unmount()
* (3) metadata sync (when a file/dir has been removed in UFS)
* Permission check should be skipped in (2) and (3).
*
* @param rpcContext the rpc context
* @param inodePath the file {@link LockedInodePath}
* @param deleteContext the method optitions
* @param bypassPermCheck whether the permission check has been done before entering this call
*/
@VisibleForTesting
public void deleteInternal(RpcContext rpcContext, LockedInodePath inodePath, DeleteContext deleteContext, boolean bypassPermCheck) throws FileDoesNotExistException, IOException, DirectoryNotEmptyException, InvalidPathException {
Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE);
// journaled will result in an inconsistency between Alluxio and UFS.
if (!inodePath.fullPathExists()) {
return;
}
long opTimeMs = System.currentTimeMillis();
Inode inode = inodePath.getInode();
if (inode == null) {
return;
}
boolean recursive = deleteContext.getOptions().getRecursive();
if (inode.isDirectory() && !recursive && mInodeStore.hasChildren(inode.asDirectory())) {
// true
throw new DirectoryNotEmptyException(ExceptionMessage.DELETE_NONEMPTY_DIRECTORY_NONRECURSIVE, inode.getName());
}
if (mInodeTree.isRootId(inode.getId())) {
// The root cannot be deleted.
throw new InvalidPathException(ExceptionMessage.DELETE_ROOT_DIRECTORY.getMessage());
}
// Inodes for which deletion will be attempted
List<Pair<AlluxioURI, LockedInodePath>> inodesToDelete;
if (inode.isDirectory()) {
inodesToDelete = new ArrayList<>((int) inode.asDirectory().getChildCount());
} else {
inodesToDelete = new ArrayList<>(1);
}
// Add root of sub-tree to delete
inodesToDelete.add(new Pair<>(inodePath.getUri(), inodePath));
// Inodes that are not safe for recursive deletes
// Issues#15266: This can be replaced by a Trie<Long> using prefix matching
Set<Long> unsafeInodes = new HashSet<>();
// Unsafe parents due to containing a child which cannot be deleted
// are initially contained in a separate set, allowing their children
// to be deleted for which the user has permissions
Set<Long> unsafeParentInodes = new HashSet<>();
// Alluxio URIs (and the reason for failure) which could not be deleted
List<Pair<String, String>> failedUris = new ArrayList<>();
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
// Therefore, we first see a parent, then all its children.
for (LockedInodePath childPath : descendants) {
if (bypassPermCheck) {
inodesToDelete.add(new Pair<>(mInodeTree.getPath(childPath.getInode()), childPath));
} else {
try {
// Because we first see the parent then all its children
if (unsafeInodes.contains(childPath.getAncestorInode().getId())) {
// We still need to add this child to the unsafe set because we are going to
// walk over this child's children.
unsafeInodes.add(childPath.getInode().getId());
continue;
}
mPermissionChecker.checkPermission(Mode.Bits.WRITE, childPath);
inodesToDelete.add(new Pair<>(mInodeTree.getPath(childPath.getInode()), childPath));
} catch (AccessControlException e) {
// If we do not have permission to delete the inode, then add to unsafe set
Inode inodeToDelete = childPath.getInode();
unsafeInodes.add(inodeToDelete.getId());
// Propagate 'unsafe-ness' to parent as one of its descendants can't be deleted
unsafeParentInodes.add(inodeToDelete.getParentId());
// All this node's children will be skipped in the failure message
failedUris.add(new Pair<>(childPath.toString(), e.getMessage()));
}
}
}
unsafeInodes.addAll(unsafeParentInodes);
// Prepare to delete persisted inodes
UfsDeleter ufsDeleter = NoopUfsDeleter.INSTANCE;
if (!deleteContext.getOptions().getAlluxioOnly()) {
ufsDeleter = new SafeUfsDeleter(mMountTable, mInodeStore, inodesToDelete, deleteContext.getOptions().build());
}
// file, we deal with the checkpoints and blocks as well.
for (int i = inodesToDelete.size() - 1; i >= 0; i--) {
rpcContext.throwIfCancelled();
Pair<AlluxioURI, LockedInodePath> inodePairToDelete = inodesToDelete.get(i);
AlluxioURI alluxioUriToDelete = inodePairToDelete.getFirst();
Inode inodeToDelete = inodePairToDelete.getSecond().getInode();
String failureReason = null;
if (unsafeInodes.contains(inodeToDelete.getId())) {
failureReason = ExceptionMessage.DELETE_FAILED_DIR_NONEMPTY.getMessage();
} else if (inodeToDelete.isPersisted()) {
// TODO(calvin): Add tests (ALLUXIO-1831)
if (mMountTable.isMountPoint(alluxioUriToDelete)) {
mMountTable.delete(rpcContext, alluxioUriToDelete, true);
} else {
if (!deleteContext.getOptions().getAlluxioOnly()) {
try {
checkUfsMode(alluxioUriToDelete, OperationType.WRITE);
// Attempt to delete node if all children were deleted successfully
ufsDeleter.delete(alluxioUriToDelete, inodeToDelete);
} catch (AccessControlException | IOException e) {
// In case ufs is not writable, we will still attempt to delete other entries
// if any as they may be from a different mount point
LOG.warn("Failed to delete {}: {}", alluxioUriToDelete, e.toString());
failureReason = e.getMessage();
}
}
}
}
if (failureReason == null) {
if (inodeToDelete.isFile()) {
long fileId = inodeToDelete.getId();
// Remove the file from the set of files to persist.
mPersistRequests.remove(fileId);
// Cancel any ongoing jobs.
PersistJob job = mPersistJobs.get(fileId);
if (job != null) {
job.setCancelState(PersistJob.CancelState.TO_BE_CANCELED);
}
}
} else {
unsafeInodes.add(inodeToDelete.getId());
// Propagate 'unsafe-ness' to parent as one of its descendants can't be deleted
unsafeInodes.add(inodeToDelete.getParentId());
failedUris.add(new Pair<>(alluxioUriToDelete.toString(), failureReason));
// Something went wrong with this path so it cannot be removed normally
// Remove the path from further processing
inodesToDelete.set(i, null);
}
}
if (mSyncManager.isSyncPoint(inodePath.getUri())) {
mSyncManager.stopSyncAndJournal(RpcContext.NOOP, inodePath.getUri());
}
// Delete Inodes from children to parents
for (int i = inodesToDelete.size() - 1; i >= 0; i--) {
Pair<AlluxioURI, LockedInodePath> delInodePair = inodesToDelete.get(i);
// The entry is null because an error is met from the pre-processing
if (delInodePair == null) {
continue;
}
LockedInodePath tempInodePath = delInodePair.getSecond();
MountTable.Resolution resolution = mMountTable.resolve(tempInodePath.getUri());
mInodeTree.deleteInode(rpcContext, tempInodePath, opTimeMs);
if (deleteContext.getOptions().getAlluxioOnly()) {
Metrics.getUfsOpsSavedCounter(resolution.getUfsMountPointUri(), Metrics.UFSOps.DELETE_FILE).inc();
}
}
if (!failedUris.isEmpty()) {
throw new FailedPreconditionException(buildDeleteFailureMessage(failedUris));
}
}
Metrics.PATHS_DELETED.inc(inodesToDelete.size());
}
Aggregations