use of alluxio.master.file.InodeSyncStream.SyncStatus.FAILED in project alluxio by Alluxio.
the class DefaultFileSystemMaster method deleteInternal.
/**
* Implements file deletion.
* <p>
* This method does not delete blocks. Instead, it returns deleted inodes so that their blocks can
* be deleted after the inode deletion journal entry has been written. We cannot delete blocks
* earlier because the inode deletion may fail, leaving us with inode containing deleted blocks.
*
* @param rpcContext the rpc context
* @param inodePath the file {@link LockedInodePath}
* @param deleteContext the method optitions
*/
@VisibleForTesting
public void deleteInternal(RpcContext rpcContext, LockedInodePath inodePath, DeleteContext deleteContext) throws FileDoesNotExistException, IOException, DirectoryNotEmptyException, InvalidPathException {
Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE);
// journaled will result in an inconsistency between Alluxio and UFS.
if (!inodePath.fullPathExists()) {
return;
}
long opTimeMs = System.currentTimeMillis();
Inode inode = inodePath.getInode();
if (inode == null) {
return;
}
boolean recursive = deleteContext.getOptions().getRecursive();
if (inode.isDirectory() && !recursive && mInodeStore.hasChildren(inode.asDirectory())) {
// true
throw new DirectoryNotEmptyException(ExceptionMessage.DELETE_NONEMPTY_DIRECTORY_NONRECURSIVE, inode.getName());
}
if (mInodeTree.isRootId(inode.getId())) {
// The root cannot be deleted.
throw new InvalidPathException(ExceptionMessage.DELETE_ROOT_DIRECTORY.getMessage());
}
// Inodes for which deletion will be attempted
List<Pair<AlluxioURI, LockedInodePath>> inodesToDelete = new ArrayList<>();
// Add root of sub-tree to delete
inodesToDelete.add(new Pair<>(inodePath.getUri(), inodePath));
try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) {
for (LockedInodePath childPath : descendants) {
inodesToDelete.add(new Pair<>(mInodeTree.getPath(childPath.getInode()), childPath));
}
// Prepare to delete persisted inodes
UfsDeleter ufsDeleter = NoopUfsDeleter.INSTANCE;
if (!deleteContext.getOptions().getAlluxioOnly()) {
ufsDeleter = new SafeUfsDeleter(mMountTable, mInodeStore, inodesToDelete, deleteContext.getOptions().build());
}
// Inodes to delete from tree after attempting to delete from UFS
List<Pair<AlluxioURI, LockedInodePath>> revisedInodesToDelete = new ArrayList<>();
// Inodes that are not safe for recursive deletes
Set<Long> unsafeInodes = new HashSet<>();
// Alluxio URIs (and the reason for failure) which could not be deleted
List<Pair<String, String>> failedUris = new ArrayList<>();
// file, we deal with the checkpoints and blocks as well.
for (int i = inodesToDelete.size() - 1; i >= 0; i--) {
rpcContext.throwIfCancelled();
Pair<AlluxioURI, LockedInodePath> inodePairToDelete = inodesToDelete.get(i);
AlluxioURI alluxioUriToDelete = inodePairToDelete.getFirst();
Inode inodeToDelete = inodePairToDelete.getSecond().getInode();
String failureReason = null;
if (unsafeInodes.contains(inodeToDelete.getId())) {
failureReason = ExceptionMessage.DELETE_FAILED_DIR_NONEMPTY.getMessage();
} else if (inodeToDelete.isPersisted()) {
// TODO(calvin): Add tests (ALLUXIO-1831)
if (mMountTable.isMountPoint(alluxioUriToDelete)) {
mMountTable.delete(rpcContext, alluxioUriToDelete, true);
} else {
if (!deleteContext.getOptions().getAlluxioOnly()) {
try {
checkUfsMode(alluxioUriToDelete, OperationType.WRITE);
// Attempt to delete node if all children were deleted successfully
ufsDeleter.delete(alluxioUriToDelete, inodeToDelete);
} catch (AccessControlException | IOException e) {
// In case ufs is not writable, we will still attempt to delete other entries
// if any as they may be from a different mount point
LOG.warn("Failed to delete {}: {}", alluxioUriToDelete, e.toString());
failureReason = e.getMessage();
}
}
}
}
if (failureReason == null) {
if (inodeToDelete.isFile()) {
long fileId = inodeToDelete.getId();
// Remove the file from the set of files to persist.
mPersistRequests.remove(fileId);
// Cancel any ongoing jobs.
PersistJob job = mPersistJobs.get(fileId);
if (job != null) {
job.setCancelState(PersistJob.CancelState.TO_BE_CANCELED);
}
}
revisedInodesToDelete.add(new Pair<>(alluxioUriToDelete, inodePairToDelete.getSecond()));
} else {
unsafeInodes.add(inodeToDelete.getId());
// Propagate 'unsafe-ness' to parent as one of its descendants can't be deleted
unsafeInodes.add(inodeToDelete.getParentId());
failedUris.add(new Pair<>(alluxioUriToDelete.toString(), failureReason));
}
}
if (mSyncManager.isSyncPoint(inodePath.getUri())) {
mSyncManager.stopSyncAndJournal(RpcContext.NOOP, inodePath.getUri());
}
// Delete Inodes
for (Pair<AlluxioURI, LockedInodePath> delInodePair : revisedInodesToDelete) {
LockedInodePath tempInodePath = delInodePair.getSecond();
MountTable.Resolution resolution = mMountTable.resolve(tempInodePath.getUri());
mInodeTree.deleteInode(rpcContext, tempInodePath, opTimeMs);
if (deleteContext.getOptions().getAlluxioOnly()) {
Metrics.getUfsOpsSavedCounter(resolution.getUfsMountPointUri(), Metrics.UFSOps.DELETE_FILE).inc();
}
}
if (!failedUris.isEmpty()) {
Collection<String> messages = failedUris.stream().map(pair -> String.format("%s (%s)", pair.getFirst(), pair.getSecond())).collect(Collectors.toList());
throw new FailedPreconditionException(ExceptionMessage.DELETE_FAILED_UFS.getMessage(StringUtils.join(messages, ", ")));
}
}
Metrics.PATHS_DELETED.inc(inodesToDelete.size());
}
use of alluxio.master.file.InodeSyncStream.SyncStatus.FAILED in project alluxio by Alluxio.
the class DefaultFileSystemMaster method getFileInfoInternal.
/**
* @param inodePath the {@link LockedInodePath} to get the {@link FileInfo} for
* @return the {@link FileInfo} for the given inode
*/
private FileInfo getFileInfoInternal(LockedInodePath inodePath, Counter counter) throws FileDoesNotExistException, UnavailableException {
Inode inode = inodePath.getInode();
AlluxioURI uri = inodePath.getUri();
FileInfo fileInfo = inode.generateClientFileInfo(uri.toString());
if (fileInfo.isFolder()) {
fileInfo.setLength(inode.asDirectory().getChildCount());
}
fileInfo.setInMemoryPercentage(getInMemoryPercentage(inode));
fileInfo.setInAlluxioPercentage(getInAlluxioPercentage(inode));
if (inode.isFile()) {
try {
fileInfo.setFileBlockInfos(getFileBlockInfoListInternal(inodePath));
} catch (InvalidPathException e) {
throw new FileDoesNotExistException(e.getMessage(), e);
}
}
// Rehydrate missing block-infos for persisted files.
if (fileInfo.isCompleted() && fileInfo.getBlockIds().size() > fileInfo.getFileBlockInfos().size() && inode.isPersisted()) {
List<Long> missingBlockIds = fileInfo.getBlockIds().stream().filter((bId) -> fileInfo.getFileBlockInfo(bId) != null).collect(Collectors.toList());
LOG.warn("BlockInfo missing for file: {}. BlockIdsWithMissingInfos: {}", inodePath.getUri(), missingBlockIds.stream().map(Object::toString).collect(Collectors.joining(",")));
// Remove old block metadata from block-master before re-committing.
mBlockMaster.removeBlocks(fileInfo.getBlockIds(), true);
// Commit all the file blocks (without locations) so the metadata for the block exists.
commitBlockInfosForFile(fileInfo.getBlockIds(), fileInfo.getLength(), fileInfo.getBlockSizeBytes());
// Reset file-block-info list with the new list.
try {
fileInfo.setFileBlockInfos(getFileBlockInfoListInternal(inodePath));
} catch (InvalidPathException e) {
throw new FileDoesNotExistException(String.format("Hydration failed for file: %s", inodePath.getUri()), e);
}
}
fileInfo.setXAttr(inode.getXAttr());
MountTable.Resolution resolution;
try {
resolution = mMountTable.resolve(uri);
} catch (InvalidPathException e) {
throw new FileDoesNotExistException(e.getMessage(), e);
}
AlluxioURI resolvedUri = resolution.getUri();
fileInfo.setUfsPath(resolvedUri.toString());
fileInfo.setMountId(resolution.getMountId());
if (counter == null) {
Metrics.getUfsOpsSavedCounter(resolution.getUfsMountPointUri(), Metrics.UFSOps.GET_FILE_INFO).inc();
} else {
counter.inc();
}
Metrics.FILE_INFOS_GOT.inc();
return fileInfo;
}
Aggregations