use of alluxio.exception.InvalidPathException in project alluxio by Alluxio.
the class InodeTree method createPath.
/**
* Creates a file or directory at path.
*
* This method expects that the last edge leading to the target inode to be write-locked. If the
* last existing inode in the path is /a/b/c and we want to create /a/b/c/d/e, the c->d edge must
* be write locked.
*
* On success, createPath attempts to push the write lock forward as far as possible. For the
* above example, createPath would take a write lock on d->e, and downgrade the c->d lock from a
* write lock to a read lock. This may not be possible if inodePath is a composite path which
* doesn't own the write lock. In that case no downgrade will occur.
*
* @param rpcContext the rpc context
* @param inodePath the path
* @param context method context
* @return a list of created inodes
* @throws FileAlreadyExistsException when there is already a file at path if we want to create a
* directory there
* @throws BlockInfoException when blockSizeBytes is invalid
* @throws InvalidPathException when path is invalid, for example, (1) when there is nonexistent
* necessary parent directories and recursive is false, (2) when one of the necessary
* parent directories is actually a file
* @throws FileDoesNotExistException if the parent of the path does not exist and the recursive
* option is false
*/
public List<Inode> createPath(RpcContext rpcContext, LockedInodePath inodePath, CreatePathContext<?, ?> context) throws FileAlreadyExistsException, BlockInfoException, InvalidPathException, IOException, FileDoesNotExistException {
Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE);
// TODO(gpang): consider splitting this into createFilePath and createDirectoryPath, with a
// helper method for the shared logic.
AlluxioURI path = inodePath.getUri();
if (path.isRoot()) {
String errorMessage = "Not allowed to create existing root path: " + path;
LOG.error(errorMessage);
throw new FileAlreadyExistsException(errorMessage);
}
if (inodePath.fullPathExists()) {
if (context instanceof CreateDirectoryContext && ((CreateDirectoryContext) context).getOptions().getAllowExists()) {
return Collections.emptyList();
} else {
String pathType = "file";
if (context instanceof CreateDirectoryContext) {
pathType = "directory";
}
String errorMessage = String.format("Not allowed to create %s because path already exists: %s", pathType, path);
throw new FileAlreadyExistsException(errorMessage);
}
}
if (context instanceof CreateFileContext) {
CreateFileContext fileContext = (CreateFileContext) context;
if (fileContext.getOptions().getBlockSizeBytes() < 1) {
throw new BlockInfoException("Invalid block size " + fileContext.getOptions().getBlockSizeBytes());
}
}
LOG.debug("createPath {}", path);
String[] pathComponents = inodePath.mPathComponents;
String name = path.getName();
// pathIndex is the index into pathComponents where we start filling in the path from the inode.
int pathIndex = inodePath.getExistingInodeCount();
if (pathIndex < pathComponents.length - 1) {
// Otherwise we add the remaining path components to the list of components to create.
if (!context.isRecursive()) {
throw new FileDoesNotExistException(String.format("File %s creation failed. Component %d(%s) does not exist", path, pathIndex, pathComponents[pathIndex]));
}
}
// The ancestor inode (parent or ancestor) of the target path.
Inode ancestorInode = inodePath.getAncestorInode();
if (!ancestorInode.isDirectory()) {
throw new InvalidPathException("Could not traverse to parent directory of path " + path + ". Component " + pathComponents[pathIndex - 1] + " is not a directory.");
}
InodeDirectoryView currentInodeDirectory = ancestorInode.asDirectory();
List<Inode> createdInodes = new ArrayList<>();
if (context.isPersisted()) {
// Synchronously persist directories. These inodes are already READ locked.
for (Inode inode : inodePath.getInodeList()) {
if (!inode.isPersisted()) {
// This cast is safe because we've already verified that the file inode doesn't exist.
syncPersistExistingDirectory(rpcContext, inode.asDirectory());
}
}
}
if ((pathIndex < (pathComponents.length - 1) || !mInodeStore.getChild(currentInodeDirectory, name).isPresent()) && context.getOperationTimeMs() > currentInodeDirectory.getLastModificationTimeMs()) {
// (1) There are components in parent paths that need to be created. Or
// (2) The last component of the path needs to be created.
// In these two cases, the last traversed Inode will be modified if the new timestamp is after
// the existing last modified time.
long currentId = currentInodeDirectory.getId();
try (LockResource lr = mInodeLockManager.lockUpdate(currentId)) {
long updatedLastModified = mInodeStore.get(currentId).get().getLastModificationTimeMs();
if (updatedLastModified < context.getOperationTimeMs()) {
UpdateInodeEntry.Builder updateInodeEntry = UpdateInodeEntry.newBuilder().setId(currentId).setLastModificationTimeMs(context.getOperationTimeMs()).setLastAccessTimeMs(context.getOperationTimeMs());
if (context.getXAttr() != null) {
updateInodeEntry.putAllXAttr(CommonUtils.convertToByteString(context.getXAttr()));
}
mState.applyAndJournal(rpcContext, updateInodeEntry.build());
}
}
}
// Fill in the ancestor directories that were missing.
// NOTE, we set the mode of missing ancestor directories to be the default value, rather
// than inheriting the option of the final file to create, because it may not have
// "execute" permission.
CreateDirectoryContext missingDirContext = CreateDirectoryContext.defaults();
missingDirContext.getOptions().setCommonOptions(FileSystemMasterCommonPOptions.newBuilder().setTtl(context.getTtl()).setTtlAction(context.getTtlAction()));
missingDirContext.setWriteType(context.getWriteType());
missingDirContext.setOperationTimeMs(context.getOperationTimeMs());
missingDirContext.setMountPoint(false);
missingDirContext.setOwner(context.getOwner());
missingDirContext.setGroup(context.getGroup());
missingDirContext.setXAttr(context.getXAttr());
StringBuilder pathBuilder = new StringBuilder().append(String.join(AlluxioURI.SEPARATOR, Arrays.asList(pathComponents).subList(0, pathIndex)));
for (int k = pathIndex; k < (pathComponents.length - 1); k++) {
MutableInodeDirectory newDir = MutableInodeDirectory.create(mDirectoryIdGenerator.getNewDirectoryId(rpcContext.getJournalContext()), currentInodeDirectory.getId(), pathComponents[k], missingDirContext);
if (currentInodeDirectory.isPinned() && !newDir.isPinned()) {
newDir.setPinned(true);
newDir.setMediumTypes(new HashSet<>(currentInodeDirectory.getMediumTypes()));
}
inheritOwnerAndGroupIfEmpty(newDir, currentInodeDirectory);
// if the parent has default ACL, copy that default ACL as the new directory's default
// and access acl, ANDed with the umask
// if it is part of a metadata load operation, we ignore the umask and simply inherit
// the default ACL as the directory's new default and access ACL
short mode = context.isMetadataLoad() ? Mode.createFullAccess().toShort() : newDir.getMode();
DefaultAccessControlList dAcl = currentInodeDirectory.getDefaultACL();
if (!dAcl.isEmpty()) {
Pair<AccessControlList, DefaultAccessControlList> pair = dAcl.generateChildDirACL(mode);
newDir.setInternalAcl(pair.getFirst());
newDir.setDefaultACL(pair.getSecond());
}
String newDirPath = k == 0 ? ROOT_PATH : pathBuilder.append(AlluxioURI.SEPARATOR).append(pathComponents[k]).toString();
mState.applyAndJournal(rpcContext, newDir, newDirPath);
inodePath.addNextInode(Inode.wrap(newDir));
// concurrent creates from trying to persist the same directory name.
if (context.isPersisted()) {
syncPersistExistingDirectory(rpcContext, newDir);
}
createdInodes.add(Inode.wrap(newDir));
currentInodeDirectory = newDir;
}
// Create the final path component.
MutableInode<?> newInode;
// create the new inode, with a write lock
if (context instanceof CreateDirectoryContext) {
CreateDirectoryContext directoryContext = (CreateDirectoryContext) context;
MutableInodeDirectory newDir = MutableInodeDirectory.create(mDirectoryIdGenerator.getNewDirectoryId(rpcContext.getJournalContext()), currentInodeDirectory.getId(), name, directoryContext);
// if the parent has default ACL, take the default ACL ANDed with the umask as the new
// directory's default and access acl
// When it is a metadata load operation, do not take the umask into account
short mode = context.isMetadataLoad() ? Mode.createFullAccess().toShort() : newDir.getMode();
DefaultAccessControlList dAcl = currentInodeDirectory.getDefaultACL();
if (!dAcl.isEmpty()) {
Pair<AccessControlList, DefaultAccessControlList> pair = dAcl.generateChildDirACL(mode);
newDir.setInternalAcl(pair.getFirst());
newDir.setDefaultACL(pair.getSecond());
}
if (directoryContext.isPersisted()) {
// Do not journal the persist entry, since a creation entry will be journaled instead.
if (context.isMetadataLoad()) {
// if we are creating the file as a result of loading metadata, the newDir is already
// persisted, and we got the permissions info from the ufs.
newDir.setOwner(context.getOwner().intern()).setGroup(context.getGroup().intern()).setMode(context.getMode().toShort());
Long operationTimeMs = context.getOperationTimeMs();
if (operationTimeMs != null) {
newDir.setLastModificationTimeMs(operationTimeMs, true);
newDir.setLastAccessTimeMs(operationTimeMs, true);
}
newDir.setPersistenceState(PersistenceState.PERSISTED);
} else {
syncPersistNewDirectory(newDir);
}
}
// Do NOT call setOwner/Group after inheriting from parent if empty
inheritOwnerAndGroupIfEmpty(newDir, currentInodeDirectory);
newInode = newDir;
} else if (context instanceof CreateFileContext) {
CreateFileContext fileContext = (CreateFileContext) context;
MutableInodeFile newFile = MutableInodeFile.create(mContainerIdGenerator.getNewContainerId(), currentInodeDirectory.getId(), name, System.currentTimeMillis(), fileContext);
// if the parent has a default ACL, copy that default ACL ANDed with the umask as the new
// file's access ACL.
// If it is a metadata load operation, do not consider the umask.
DefaultAccessControlList dAcl = currentInodeDirectory.getDefaultACL();
short mode = context.isMetadataLoad() ? Mode.createFullAccess().toShort() : newFile.getMode();
if (!dAcl.isEmpty()) {
AccessControlList acl = dAcl.generateChildFileACL(mode);
newFile.setInternalAcl(acl);
}
if (fileContext.isCacheable()) {
newFile.setCacheable(true);
}
if (fileContext.getWriteType() == WriteType.ASYNC_THROUGH) {
newFile.setPersistenceState(PersistenceState.TO_BE_PERSISTED);
}
// Do NOT call setOwner/Group after inheriting from parent if empty
inheritOwnerAndGroupIfEmpty(newFile, currentInodeDirectory);
newInode = newFile;
} else {
throw new IllegalStateException(String.format("Unrecognized create options: %s", context));
}
if (currentInodeDirectory.isPinned() && !newInode.isPinned()) {
newInode.setPinned(true);
newInode.setMediumTypes(new HashSet<>(currentInodeDirectory.getMediumTypes()));
}
mState.applyAndJournal(rpcContext, newInode, inodePath.getUri().getPath());
Inode inode = Inode.wrap(newInode);
inodePath.addNextInode(inode);
createdInodes.add(inode);
LOG.debug("createFile: File Created: {} parent: {}", newInode, currentInodeDirectory);
return createdInodes;
}
use of alluxio.exception.InvalidPathException in project alluxio by Alluxio.
the class LazyUfsBlockLocationCache method get.
@Override
@Nullable
public List<String> get(long blockId, AlluxioURI fileUri, long offset) {
List<String> locations = mCache.getIfPresent(blockId);
if (locations != null) {
return locations;
}
try {
MountTable.Resolution resolution = mMountTable.resolve(fileUri);
String ufsUri = resolution.getUri().toString();
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
UnderFileSystem ufs = ufsResource.get();
locations = ufs.getFileLocations(ufsUri, FileLocationOptions.defaults().setOffset(offset));
}
if (locations != null) {
mCache.put(blockId, locations);
return locations;
}
} catch (InvalidPathException | IOException e) {
LOG.warn("Failed to get locations for block {} in file {} with offset {}: {}", blockId, fileUri, offset, e);
}
return null;
}
use of alluxio.exception.InvalidPathException in project alluxio by Alluxio.
the class MountTable method delete.
/**
* Unmounts the given Alluxio path. The path should match an existing mount point.
*
* @param journalContext journal context
* @param uri an Alluxio path URI
* @param checkNestedMount whether to check nested mount points before delete
* @return whether the operation succeeded or not
*/
public boolean delete(Supplier<JournalContext> journalContext, AlluxioURI uri, boolean checkNestedMount) {
String path = uri.getPath();
LOG.info("Unmounting {}", path);
if (path.equals(ROOT)) {
LOG.warn("Cannot unmount the root mount point.");
return false;
}
try (LockResource r = new LockResource(mWriteLock)) {
if (mState.getMountTable().containsKey(path)) {
// check if the path contains another nested mount point
if (checkNestedMount) {
for (String mountPath : mState.getMountTable().keySet()) {
try {
if (PathUtils.hasPrefix(mountPath, path) && (!path.equals(mountPath))) {
LOG.warn("The path to unmount {} contains another nested mountpoint {}", path, mountPath);
return false;
}
} catch (InvalidPathException e) {
LOG.warn("Invalid path {} encountered when checking for nested mount point", path);
}
}
}
mUfsManager.removeMount(mState.getMountTable().get(path).getMountId());
mState.applyAndJournal(journalContext, DeleteMountPointEntry.newBuilder().setAlluxioPath(path).build());
return true;
}
LOG.warn("Mount point {} does not exist.", path);
return false;
}
}
use of alluxio.exception.InvalidPathException in project alluxio by Alluxio.
the class MountTable method add.
/**
* Mounts the given UFS path at the given Alluxio path. The Alluxio path should not be nested
* under an existing mount point.
*
* @param journalContext the journal context
* @param alluxioUri an Alluxio path URI
* @param ufsUri a UFS path URI
* @param mountId the mount id
* @param options the mount options
* @throws FileAlreadyExistsException if the mount point already exists
* @throws InvalidPathException if an invalid path is encountered
*/
public void add(Supplier<JournalContext> journalContext, AlluxioURI alluxioUri, AlluxioURI ufsUri, long mountId, MountPOptions options) throws FileAlreadyExistsException, InvalidPathException {
String alluxioPath = alluxioUri.getPath().isEmpty() ? "/" : alluxioUri.getPath();
LOG.info("Mounting {} at {}", ufsUri, alluxioPath);
try (LockResource r = new LockResource(mWriteLock)) {
if (mState.getMountTable().containsKey(alluxioPath)) {
throw new FileAlreadyExistsException(ExceptionMessage.MOUNT_POINT_ALREADY_EXISTS.getMessage(alluxioPath));
}
// or suffix of any existing mount path.
for (Map.Entry<String, MountInfo> entry : mState.getMountTable().entrySet()) {
AlluxioURI mountedUfsUri = entry.getValue().getUfsUri();
if ((ufsUri.getScheme() == null || ufsUri.getScheme().equals(mountedUfsUri.getScheme())) && (ufsUri.getAuthority().toString().equals(mountedUfsUri.getAuthority().toString()))) {
String ufsPath = ufsUri.getPath().isEmpty() ? "/" : ufsUri.getPath();
String mountedUfsPath = mountedUfsUri.getPath().isEmpty() ? "/" : mountedUfsUri.getPath();
if (PathUtils.hasPrefix(ufsPath, mountedUfsPath)) {
throw new InvalidPathException(ExceptionMessage.MOUNT_POINT_PREFIX_OF_ANOTHER.getMessage(mountedUfsUri.toString(), ufsUri.toString()));
}
if (PathUtils.hasPrefix(mountedUfsPath, ufsPath)) {
throw new InvalidPathException(ExceptionMessage.MOUNT_POINT_PREFIX_OF_ANOTHER.getMessage(ufsUri.toString(), mountedUfsUri.toString()));
}
}
}
Map<String, String> properties = options.getPropertiesMap();
mState.applyAndJournal(journalContext, AddMountPointEntry.newBuilder().addAllProperties(properties.entrySet().stream().map(entry -> StringPairEntry.newBuilder().setKey(entry.getKey()).setValue(entry.getValue()).build()).collect(Collectors.toList())).setAlluxioPath(alluxioPath).setMountId(mountId).setReadOnly(options.getReadOnly()).setShared(options.getShared()).setUfsPath(ufsUri.toString()).build());
}
}
use of alluxio.exception.InvalidPathException in project alluxio by Alluxio.
the class ActiveSyncManager method start.
/**
* Start the polling threads.
*/
public void start() throws IOException {
mStarted = true;
// Initialize UFS states
for (AlluxioURI syncPoint : mSyncPathList) {
MountTable.Resolution resolution;
try {
resolution = mMountTable.resolve(syncPoint);
} catch (InvalidPathException e) {
LOG.info("Invalid Path encountered during start up of ActiveSyncManager, " + "path {}, exception {}", syncPoint, e);
continue;
}
try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) {
if (!ufsResource.get().supportsActiveSync()) {
throw new UnsupportedOperationException("Active Sync is not supported on this UFS type: " + ufsResource.get().getUnderFSType());
}
ufsResource.get().startSync(resolution.getUri());
}
}
// therefore forces a sync
for (Map.Entry<Long, List<AlluxioURI>> entry : mFilterMap.entrySet()) {
long mountId = entry.getKey();
long txId = mStartingTxIdMap.getOrDefault(mountId, SyncInfo.INVALID_TXID);
if (!entry.getValue().isEmpty()) {
launchPollingThread(mountId, txId);
}
try {
if ((txId == SyncInfo.INVALID_TXID) && ServerConfiguration.getBoolean(PropertyKey.MASTER_UFS_ACTIVE_SYNC_INITIAL_SYNC_ENABLED)) {
mExecutorService.submit(() -> entry.getValue().parallelStream().forEach(syncPoint -> {
MountTable.Resolution resolution;
try {
resolution = mMountTable.resolve(syncPoint);
} catch (InvalidPathException e) {
LOG.info("Invalid Path encountered during start up of ActiveSyncManager, " + "path {}, exception {}", syncPoint, e);
return;
}
startInitialFullSync(syncPoint, resolution);
}));
}
} catch (Exception e) {
LOG.warn("exception encountered during initial sync: {}", e.toString());
}
}
}
Aggregations