Search in sources :

Example 31 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class NativeAzureFileSystem method mkdirs.

public boolean mkdirs(Path f, FsPermission permission, boolean noUmask) throws IOException {
    LOG.debug("Creating directory: {}", f.toString());
    if (containsColon(f)) {
        throw new IOException("Cannot create directory " + f + " through WASB that has colons in the name");
    }
    Path absolutePath = makeAbsolute(f);
    performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.EXECUTE.toString(), "mkdirs");
    PermissionStatus permissionStatus = null;
    if (noUmask) {
        // ensure owner still has wx permissions at the minimum
        permissionStatus = createPermissionStatus(applyUMask(FsPermission.createImmutable((short) (permission.toShort() | USER_WX_PERMISION)), UMaskApplyMode.NewDirectoryNoUmask));
    } else {
        permissionStatus = createPermissionStatus(applyUMask(permission, UMaskApplyMode.NewDirectory));
    }
    ArrayList<String> keysToCreateAsFolder = new ArrayList<String>();
    ArrayList<String> keysToUpdateAsFolder = new ArrayList<String>();
    boolean childCreated = false;
    // Check that there is no file in the parent chain of the given path.
    for (Path current = absolutePath, parent = current.getParent(); // Stop when you get to the root
    parent != null; current = parent, parent = current.getParent()) {
        String currentKey = pathToKey(current);
        FileMetadata currentMetadata = store.retrieveMetadata(currentKey);
        if (currentMetadata != null && !currentMetadata.isDir()) {
            throw new FileAlreadyExistsException("Cannot create directory " + f + " because " + current + " is an existing file.");
        } else if (currentMetadata == null) {
            keysToCreateAsFolder.add(currentKey);
            childCreated = true;
        } else {
            // updated if there is a child directory created under it.
            if (childCreated) {
                keysToUpdateAsFolder.add(currentKey);
            }
            childCreated = false;
        }
    }
    for (String currentKey : keysToCreateAsFolder) {
        store.storeEmptyFolder(currentKey, permissionStatus);
    }
    instrumentation.directoryCreated();
    // otherwise throws exception
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) ArrayList(java.util.ArrayList) IOException(java.io.IOException) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 32 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class NativeAzureFileSystem method create.

/**
   * Create an Azure blob and return an output stream to use
   * to write data to it.
   *
   * @param f
   * @param permission
   * @param overwrite
   * @param createParent
   * @param bufferSize
   * @param replication
   * @param blockSize
   * @param progress
   * @param parentFolderLease Lease on parent folder (or null if
   * no lease).
   * @return
   * @throws IOException
   */
private FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, boolean createParent, int bufferSize, short replication, long blockSize, Progressable progress, SelfRenewingLease parentFolderLease) throws FileAlreadyExistsException, IOException {
    LOG.debug("Creating file: {}", f.toString());
    if (containsColon(f)) {
        throw new IOException("Cannot create file " + f + " through WASB that has colons in the name");
    }
    Path absolutePath = makeAbsolute(f);
    performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.WRITE.toString(), "create");
    String key = pathToKey(absolutePath);
    FileMetadata existingMetadata = store.retrieveMetadata(key);
    if (existingMetadata != null) {
        if (existingMetadata.isDir()) {
            throw new FileAlreadyExistsException("Cannot create file " + f + "; already exists as a directory.");
        }
        if (!overwrite) {
            throw new FileAlreadyExistsException("File already exists:" + f);
        }
    }
    Path parentFolder = absolutePath.getParent();
    if (parentFolder != null && parentFolder.getParent() != null) {
        // skip root
        // Update the parent folder last modified time if the parent folder
        // already exists.
        String parentKey = pathToKey(parentFolder);
        FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
        if (parentMetadata != null && parentMetadata.isDir() && parentMetadata.getBlobMaterialization() == BlobMaterialization.Explicit) {
            if (parentFolderLease != null) {
                store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
            } else {
                updateParentFolderLastModifiedTime(key);
            }
        } else {
            // Make sure that the parent folder exists.
            // Create it using inherited permissions from the first existing directory going up the path
            Path firstExisting = parentFolder.getParent();
            FileMetadata metadata = store.retrieveMetadata(pathToKey(firstExisting));
            while (metadata == null) {
                // Guaranteed to terminate properly because we will eventually hit root, which will return non-null metadata
                firstExisting = firstExisting.getParent();
                metadata = store.retrieveMetadata(pathToKey(firstExisting));
            }
            mkdirs(parentFolder, metadata.getPermissionStatus().getPermission(), true);
        }
    }
    // Mask the permission first (with the default permission mask as well).
    FsPermission masked = applyUMask(permission, UMaskApplyMode.NewFile);
    PermissionStatus permissionStatus = createPermissionStatus(masked);
    OutputStream bufOutStream;
    if (store.isPageBlobKey(key)) {
        // Store page blobs directly in-place without renames.
        bufOutStream = store.storefile(key, permissionStatus);
    } else {
        // This is a block blob, so open the output blob stream based on the
        // encoded key.
        //
        String keyEncoded = encodeKey(key);
        // First create a blob at the real key, pointing back to the temporary file
        // This accomplishes a few things:
        // 1. Makes sure we can create a file there.
        // 2. Makes it visible to other concurrent threads/processes/nodes what
        // we're
        // doing.
        // 3. Makes it easier to restore/cleanup data in the event of us crashing.
        store.storeEmptyLinkFile(key, keyEncoded, permissionStatus);
        // The key is encoded to point to a common container at the storage server.
        // This reduces the number of splits on the server side when load balancing.
        // Ingress to Azure storage can take advantage of earlier splits. We remove
        // the root path to the key and prefix a random GUID to the tail (or leaf
        // filename) of the key. Keys are thus broadly and randomly distributed over
        // a single container to ease load balancing on the storage server. When the
        // blob is committed it is renamed to its earlier key. Uncommitted blocks
        // are not cleaned up and we leave it to Azure storage to garbage collect
        // these
        // blocks.
        bufOutStream = new NativeAzureFsOutputStream(store.storefile(keyEncoded, permissionStatus), key, keyEncoded);
    }
    // Construct the data output stream from the buffered output stream.
    FSDataOutputStream fsOut = new FSDataOutputStream(bufOutStream, statistics);
    // Increment the counter
    instrumentation.fileCreated();
    // Return data output stream to caller.
    return fsOut;
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 33 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class NativeAzureFileSystem method setOwner.

@Override
public void setOwner(Path p, String username, String groupname) throws IOException {
    Path absolutePath = makeAbsolute(p);
    performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.EXECUTE.toString(), "setOwner");
    String key = pathToKey(absolutePath);
    FileMetadata metadata = null;
    try {
        metadata = store.retrieveMetadata(key);
    } catch (IOException ex) {
        Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
        if (innerException instanceof StorageException && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
            throw new FileNotFoundException(String.format("File %s doesn't exists.", p));
        }
        throw ex;
    }
    if (metadata == null) {
        throw new FileNotFoundException("File doesn't exist: " + p);
    }
    PermissionStatus newPermissionStatus = new PermissionStatus(username == null ? metadata.getPermissionStatus().getUserName() : username, groupname == null ? metadata.getPermissionStatus().getGroupName() : groupname, metadata.getPermissionStatus().getPermission());
    if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
        // It's an implicit folder, need to materialize it.
        store.storeEmptyFolder(key, newPermissionStatus);
    } else {
        store.changePermissionStatus(key, newPermissionStatus);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) StorageException(com.microsoft.azure.storage.StorageException) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Aggregations

PermissionStatus (org.apache.hadoop.fs.permission.PermissionStatus)33 FsPermission (org.apache.hadoop.fs.permission.FsPermission)11 Configuration (org.apache.hadoop.conf.Configuration)9 IOException (java.io.IOException)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 Test (org.junit.Test)6 Path (org.apache.hadoop.fs.Path)5 Before (org.junit.Before)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 Block (org.apache.hadoop.hdfs.protocol.Block)3 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)3 StorageException (com.microsoft.azure.storage.StorageException)2 DataOutputStream (java.io.DataOutputStream)2 File (java.io.File)2 FileNotFoundException (java.io.FileNotFoundException)2 ArrayList (java.util.ArrayList)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 Mockito.doAnswer (org.mockito.Mockito.doAnswer)2