use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class FileSystemMaster method rename.
/**
* Renames a file to a destination.
* <p>
* This operation requires users to have
* {@link Mode.Bits#WRITE} permission on the parent of the src path, and
* {@link Mode.Bits#WRITE} permission on the parent of the dst path.
*
* @param srcPath the source path to rename
* @param dstPath the destination path to rename the file to
* @param options method options
* @throws FileDoesNotExistException if a non-existent file is encountered
* @throws InvalidPathException if an invalid path is encountered
* @throws IOException if an I/O error occurs
* @throws AccessControlException if permission checking fails
* @throws FileAlreadyExistsException if the file already exists
*/
public void rename(AlluxioURI srcPath, AlluxioURI dstPath, RenameOptions options) throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException, IOException, AccessControlException {
Metrics.RENAME_PATH_OPS.inc();
// modify operations on the parent inodes are thread safe so WRITE locks are not required.
try (JournalContext journalContext = createJournalContext();
InodePathPair inodePathPair = mInodeTree.lockInodePathPair(srcPath, InodeTree.LockMode.WRITE, dstPath, InodeTree.LockMode.READ)) {
LockedInodePath srcInodePath = inodePathPair.getFirst();
LockedInodePath dstInodePath = inodePathPair.getSecond();
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, srcInodePath);
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, dstInodePath);
mMountTable.checkUnderWritableMountPoint(srcPath);
mMountTable.checkUnderWritableMountPoint(dstPath);
renameAndJournal(srcInodePath, dstInodePath, options, journalContext);
LOG.debug("Renamed {} to {}", srcPath, dstPath);
}
}
use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class FileSystemMasterIntegrationTest method ttlCreateFile.
@Test
public void ttlCreateFile() throws Exception {
mFsMaster.createDirectory(new AlluxioURI("/testFolder"), CreateDirectoryOptions.defaults());
long ttl = 100;
CreateFileOptions options = CreateFileOptions.defaults().setTtl(ttl);
options.setTtlAction(TtlAction.FREE);
try (LockedInodePath inodePath = mInodeTree.lockInodePath(new AlluxioURI("/testFolder/testFile"), InodeTree.LockMode.WRITE)) {
mFsMaster.createFileInternal(inodePath, options);
}
FileInfo folderInfo = mFsMaster.getFileInfo(mFsMaster.getFileId(new AlluxioURI("/testFolder/testFile")));
Assert.assertEquals(ttl, folderInfo.getTtl());
Assert.assertEquals(TtlAction.FREE, folderInfo.getTtlAction());
}
use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class DefaultFileSystemMaster method renameInternal.
/**
* Renames a file to a destination.
*
* @param rpcContext the rpc context
* @param srcInodePath the source path to rename
* @param dstInodePath the destination path to rename the file to
* @param context method options
*/
private void renameInternal(RpcContext rpcContext, LockedInodePath srcInodePath, LockedInodePath dstInodePath, RenameContext context) throws InvalidPathException, FileDoesNotExistException, FileAlreadyExistsException, IOException, AccessControlException {
if (!srcInodePath.fullPathExists()) {
throw new FileDoesNotExistException(ExceptionMessage.PATH_DOES_NOT_EXIST.getMessage(srcInodePath.getUri()));
}
Inode srcInode = srcInodePath.getInode();
// Renaming path to itself is a no-op.
if (srcInodePath.getUri().equals(dstInodePath.getUri())) {
return;
}
// Renaming the root is not allowed.
if (srcInodePath.getUri().isRoot()) {
throw new InvalidPathException(ExceptionMessage.ROOT_CANNOT_BE_RENAMED.getMessage());
}
if (dstInodePath.getUri().isRoot()) {
throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_TO_ROOT.getMessage());
}
// Renaming across mount points is not allowed.
String srcMount = mMountTable.getMountPoint(srcInodePath.getUri());
String dstMount = mMountTable.getMountPoint(dstInodePath.getUri());
if ((srcMount == null && dstMount != null) || (srcMount != null && dstMount == null) || (srcMount != null && dstMount != null && !srcMount.equals(dstMount))) {
throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_ACROSS_MOUNTS.getMessage(srcInodePath.getUri(), dstInodePath.getUri()));
}
// Renaming onto a mount point is not allowed.
if (mMountTable.isMountPoint(dstInodePath.getUri())) {
throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_ONTO_MOUNT_POINT.getMessage(dstInodePath.getUri()));
}
// srcComponents isn't a prefix of dstComponents.
if (PathUtils.hasPrefix(dstInodePath.getUri().getPath(), srcInodePath.getUri().getPath())) {
throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_TO_SUBDIRECTORY.getMessage(srcInodePath.getUri(), dstInodePath.getUri()));
}
// Get the inodes of the src and dst parents.
Inode srcParentInode = srcInodePath.getParentInodeDirectory();
if (!srcParentInode.isDirectory()) {
throw new InvalidPathException(ExceptionMessage.PATH_MUST_HAVE_VALID_PARENT.getMessage(srcInodePath.getUri()));
}
Inode dstParentInode = dstInodePath.getParentInodeDirectory();
if (!dstParentInode.isDirectory()) {
throw new InvalidPathException(ExceptionMessage.PATH_MUST_HAVE_VALID_PARENT.getMessage(dstInodePath.getUri()));
}
// Make sure destination path does not exist
if (dstInodePath.fullPathExists()) {
throw new FileAlreadyExistsException(String.format("Cannot rename because destination already exists. src: %s dst: %s", srcInodePath.getUri(), dstInodePath.getUri()));
}
// Now we remove srcInode from its parent and insert it into dstPath's parent
renameInternal(rpcContext, srcInodePath, dstInodePath, false, context);
// frameworks that use rename as a commit operation.
if (context.getPersist() && srcInode.isFile() && !srcInode.isPersisted() && shouldPersistPath(dstInodePath.toString())) {
LOG.debug("Schedule Async Persist on rename for File {}", srcInodePath);
mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder().setId(srcInode.getId()).setPersistenceState(PersistenceState.TO_BE_PERSISTED.name()).build());
long shouldPersistTime = srcInode.asFile().getShouldPersistTime();
long persistenceWaitTime = shouldPersistTime == Constants.NO_AUTO_PERSIST ? 0 : getPersistenceWaitTime(shouldPersistTime);
mPersistRequests.put(srcInode.getId(), new alluxio.time.ExponentialTimer(ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), persistenceWaitTime, ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS)));
}
// If a directory is being renamed with persist on rename, attempt to persist children
if (srcInode.isDirectory() && context.getPersist() && shouldPersistPath(dstInodePath.toString())) {
LOG.debug("Schedule Async Persist on rename for Dir: {}", dstInodePath);
try (LockedInodePathList descendants = mInodeTree.getDescendants(srcInodePath)) {
for (LockedInodePath childPath : descendants) {
Inode childInode = childPath.getInode();
// TODO(apc999): Resolve the child path legitimately
if (childInode.isFile() && !childInode.isPersisted() && shouldPersistPath(childPath.toString().substring(srcInodePath.toString().length()))) {
LOG.debug("Schedule Async Persist on rename for Child File: {}", childPath);
mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder().setId(childInode.getId()).setPersistenceState(PersistenceState.TO_BE_PERSISTED.name()).build());
long shouldPersistTime = childInode.asFile().getShouldPersistTime();
long persistenceWaitTime = shouldPersistTime == Constants.NO_AUTO_PERSIST ? 0 : getPersistenceWaitTime(shouldPersistTime);
mPersistRequests.put(childInode.getId(), new alluxio.time.ExponentialTimer(ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), persistenceWaitTime, ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS)));
}
}
}
}
}
use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class DefaultFileSystemMaster method stopSync.
@Override
public void stopSync(AlluxioURI syncPoint) throws IOException, InvalidPathException, AccessControlException {
try (RpcContext rpcContext = createRpcContext()) {
boolean isSuperUser = true;
try {
mPermissionChecker.checkSuperUser();
} catch (AccessControlException e) {
isSuperUser = false;
}
if (isSuperUser) {
// TODO(AM): Remove once we don't require a write lock on the sync point during a full sync
// Stop sync w/o acquiring an inode lock to terminate an initial full scan (if running)
mSyncManager.stopSyncAndJournal(rpcContext, syncPoint);
}
LockingScheme lockingScheme = new LockingScheme(syncPoint, LockPattern.READ, false);
try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern());
FileSystemMasterAuditContext auditContext = createAuditContext("stopSync", syncPoint, null, inodePath.getParentInodeOrNull())) {
try {
mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath);
} catch (AccessControlException e) {
auditContext.setAllowed(false);
throw e;
}
if (!isSuperUser) {
// Stop sync here only if not terminated w/o holding the inode lock
mSyncManager.stopSyncAndJournal(rpcContext, syncPoint);
}
auditContext.setSucceeded(true);
}
}
}
use of alluxio.master.file.meta.LockedInodePath in project alluxio by Alluxio.
the class DefaultFileSystemMaster method deleteInternal.
/**
* Implements file deletion.
* <p>
* This method does not delete blocks. Instead, it returns deleted inodes so that their blocks can
* be deleted after the inode deletion journal entry has been written. We cannot delete blocks
* earlier because the inode deletion may fail, leaving us with inode containing deleted blocks.
*
* @param rpcContext the rpc context
* @param inodePath the file {@link LockedInodePath}
* @param deleteContext the method optitions
*/
@VisibleForTesting
public void deleteInternal(RpcContext rpcContext, LockedInodePath inodePath, DeleteContext deleteContext) throws FileDoesNotExistException, IOException, DirectoryNotEmptyException, InvalidPathException {
Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE);
// journaled will result in an inconsistency between Alluxio and UFS.
if (!inodePath.fullPathExists()) {
return;
}
long opTimeMs = System.currentTimeMillis();
Inode inode = inodePath.getInode();
if (inode == null) {
return;
}
boolean recursive = deleteContext.getOptions().getRecursive();
if (inode.isDirectory() && !recursive && mInodeStore.hasChildren(inode.asDirectory())) {
// true
throw new DirectoryNotEmptyException(ExceptionMessage.DELETE_NONEMPTY_DIRECTORY_NONRECURSIVE, inode.getName());
}
if (mInodeTree.isRootId(inode.getId())) {
// The root cannot be deleted.
throw new InvalidPathException(ExceptionMessage.DELETE_ROOT_DIRECTORY.getMessage());
}
// Inodes for which deletion will be attempted
List<Pair<AlluxioURI, LockedInodePath>> inodesToDelete = new ArrayList<>();
// Add root of sub-tree to delete
inodesToDelete.add(new Pair<>(inodePath.getUri(), inodePath));
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
for (LockedInodePath childPath : descendants) {
inodesToDelete.add(new Pair<>(mInodeTree.getPath(childPath.getInode()), childPath));
}
// Prepare to delete persisted inodes
UfsDeleter ufsDeleter = NoopUfsDeleter.INSTANCE;
if (!deleteContext.getOptions().getAlluxioOnly()) {
ufsDeleter = new SafeUfsDeleter(mMountTable, mInodeStore, inodesToDelete, deleteContext.getOptions().build());
}
// Inodes to delete from tree after attempting to delete from UFS
List<Pair<AlluxioURI, LockedInodePath>> revisedInodesToDelete = new ArrayList<>();
// Inodes that are not safe for recursive deletes
Set<Long> unsafeInodes = new HashSet<>();
// Alluxio URIs (and the reason for failure) which could not be deleted
List<Pair<String, String>> failedUris = new ArrayList<>();
// file, we deal with the checkpoints and blocks as well.
for (int i = inodesToDelete.size() - 1; i >= 0; i--) {
rpcContext.throwIfCancelled();
Pair<AlluxioURI, LockedInodePath> inodePairToDelete = inodesToDelete.get(i);
AlluxioURI alluxioUriToDelete = inodePairToDelete.getFirst();
Inode inodeToDelete = inodePairToDelete.getSecond().getInode();
String failureReason = null;
if (unsafeInodes.contains(inodeToDelete.getId())) {
failureReason = ExceptionMessage.DELETE_FAILED_DIR_NONEMPTY.getMessage();
} else if (inodeToDelete.isPersisted()) {
// TODO(calvin): Add tests (ALLUXIO-1831)
if (mMountTable.isMountPoint(alluxioUriToDelete)) {
mMountTable.delete(rpcContext, alluxioUriToDelete, true);
} else {
if (!deleteContext.getOptions().getAlluxioOnly()) {
try {
checkUfsMode(alluxioUriToDelete, OperationType.WRITE);
// Attempt to delete node if all children were deleted successfully
ufsDeleter.delete(alluxioUriToDelete, inodeToDelete);
} catch (AccessControlException | IOException e) {
// In case ufs is not writable, we will still attempt to delete other entries
// if any as they may be from a different mount point
LOG.warn("Failed to delete {}: {}", alluxioUriToDelete, e.toString());
failureReason = e.getMessage();
}
}
}
}
if (failureReason == null) {
if (inodeToDelete.isFile()) {
long fileId = inodeToDelete.getId();
// Remove the file from the set of files to persist.
mPersistRequests.remove(fileId);
// Cancel any ongoing jobs.
PersistJob job = mPersistJobs.get(fileId);
if (job != null) {
job.setCancelState(PersistJob.CancelState.TO_BE_CANCELED);
}
}
revisedInodesToDelete.add(new Pair<>(alluxioUriToDelete, inodePairToDelete.getSecond()));
} else {
unsafeInodes.add(inodeToDelete.getId());
// Propagate 'unsafe-ness' to parent as one of its descendants can't be deleted
unsafeInodes.add(inodeToDelete.getParentId());
failedUris.add(new Pair<>(alluxioUriToDelete.toString(), failureReason));
}
}
if (mSyncManager.isSyncPoint(inodePath.getUri())) {
mSyncManager.stopSyncAndJournal(RpcContext.NOOP, inodePath.getUri());
}
// Delete Inodes
for (Pair<AlluxioURI, LockedInodePath> delInodePair : revisedInodesToDelete) {
LockedInodePath tempInodePath = delInodePair.getSecond();
MountTable.Resolution resolution = mMountTable.resolve(tempInodePath.getUri());
mInodeTree.deleteInode(rpcContext, tempInodePath, opTimeMs);
if (deleteContext.getOptions().getAlluxioOnly()) {
Metrics.getUfsOpsSavedCounter(resolution.getUfsMountPointUri(), Metrics.UFSOps.DELETE_FILE).inc();
}
}
if (!failedUris.isEmpty()) {
Collection<String> messages = failedUris.stream().map(pair -> String.format("%s (%s)", pair.getFirst(), pair.getSecond())).collect(Collectors.toList());
throw new FailedPreconditionException(ExceptionMessage.DELETE_FAILED_UFS.getMessage(StringUtils.join(messages, ", ")));
}
}
Metrics.PATHS_DELETED.inc(inodesToDelete.size());
}
Aggregations