Search in sources :

Example 1 with ScopedAclEntries

use of org.apache.hadoop.fs.permission.ScopedAclEntries in project hadoop by apache.

the class AclStorage method updateINodeAcl.

/**
   * Updates an inode with a new ACL.  This method takes a full logical ACL and
   * stores the entries to the inode's {@link FsPermission} and
   * {@link AclFeature}.
   *
   * @param inode INode to update
   * @param newAcl List<AclEntry> containing new ACL entries
   * @param snapshotId int latest snapshot ID of inode
   * @throws AclException if the ACL is invalid for the given inode
   * @throws QuotaExceededException if quota limit is exceeded
   */
public static void updateINodeAcl(INode inode, List<AclEntry> newAcl, int snapshotId) throws AclException, QuotaExceededException {
    assert newAcl.size() >= 3;
    FsPermission perm = inode.getFsPermission();
    final FsPermission newPerm;
    if (!AclUtil.isMinimalAcl(newAcl)) {
        // This is an extended ACL.  Split entries into access vs. default.
        ScopedAclEntries scoped = new ScopedAclEntries(newAcl);
        List<AclEntry> accessEntries = scoped.getAccessEntries();
        List<AclEntry> defaultEntries = scoped.getDefaultEntries();
        // Only directories may have a default ACL.
        if (!defaultEntries.isEmpty() && !inode.isDirectory()) {
            throw new AclException("Invalid ACL: only directories may have a default ACL.");
        }
        // Attach entries to the feature.
        if (inode.getAclFeature() != null) {
            inode.removeAclFeature(snapshotId);
        }
        inode.addAclFeature(createAclFeature(accessEntries, defaultEntries), snapshotId);
        newPerm = createFsPermissionForExtendedAcl(accessEntries, perm);
    } else {
        // This is a minimal ACL.  Remove the ACL feature if it previously had one.
        if (inode.getAclFeature() != null) {
            inode.removeAclFeature(snapshotId);
        }
        newPerm = createFsPermissionForMinimalAcl(newAcl, perm);
    }
    inode.setPermission(newPerm, snapshotId);
}
Also used : ScopedAclEntries(org.apache.hadoop.fs.permission.ScopedAclEntries) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission) AclException(org.apache.hadoop.hdfs.protocol.AclException)

Example 2 with ScopedAclEntries

use of org.apache.hadoop.fs.permission.ScopedAclEntries in project hadoop by apache.

the class AclStorage method readINodeLogicalAcl.

/**
   * Reads the existing ACL of an inode.  This method always returns the full
   * logical ACL of the inode after reading relevant data from the inode's
   * {@link FsPermission} and {@link AclFeature}.  Note that every inode
   * logically has an ACL, even if no ACL has been set explicitly.  If the inode
   * does not have an extended ACL, then the result is a minimal ACL consising of
   * exactly 3 entries that correspond to the owner, group and other permissions.
   * This method always reads the inode's current state and does not support
   * querying by snapshot ID.  This is because the method is intended to support
   * ACL modification APIs, which always apply a delta on top of current state.
   *
   * @param inode INode to read
   * @return List<AclEntry> containing all logical inode ACL entries
   */
public static List<AclEntry> readINodeLogicalAcl(INode inode) {
    FsPermission perm = inode.getFsPermission();
    AclFeature f = inode.getAclFeature();
    if (f == null) {
        return AclUtil.getMinimalAcl(perm);
    }
    final List<AclEntry> existingAcl;
    // Split ACL entries stored in the feature into access vs. default.
    List<AclEntry> featureEntries = getEntriesFromAclFeature(f);
    ScopedAclEntries scoped = new ScopedAclEntries(featureEntries);
    List<AclEntry> accessEntries = scoped.getAccessEntries();
    List<AclEntry> defaultEntries = scoped.getDefaultEntries();
    // Pre-allocate list size for the explicit entries stored in the feature
    // plus the 3 implicit entries (owner, group and other) from the permission
    // bits.
    existingAcl = Lists.newArrayListWithCapacity(featureEntries.size() + 3);
    if (!accessEntries.isEmpty()) {
        // Add owner entry implied from user permission bits.
        existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).setPermission(perm.getUserAction()).build());
        // Next add all named user and group entries taken from the feature.
        existingAcl.addAll(accessEntries);
        // Add mask entry implied from group permission bits.
        existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.MASK).setPermission(perm.getGroupAction()).build());
        // Add other entry implied from other permission bits.
        existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.OTHER).setPermission(perm.getOtherAction()).build());
    } else {
        // It's possible that there is a default ACL but no access ACL. In this
        // case, add the minimal access ACL implied by the permission bits.
        existingAcl.addAll(AclUtil.getMinimalAcl(perm));
    }
    // Add all default entries after the access entries.
    existingAcl.addAll(defaultEntries);
    // The above adds entries in the correct order, so no need to sort here.
    return existingAcl;
}
Also used : ScopedAclEntries(org.apache.hadoop.fs.permission.ScopedAclEntries) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 3 with ScopedAclEntries

use of org.apache.hadoop.fs.permission.ScopedAclEntries in project hadoop by apache.

the class AclTransformation method buildAndValidateAcl.

/**
   * Builds the final list of ACL entries to return by trimming, sorting and
   * validating the ACL entries that have been added.
   *
   * @param aclBuilder ArrayList<AclEntry> containing entries to build
   * @return List<AclEntry> unmodifiable, sorted list of ACL entries
   * @throws AclException if validation fails
   */
private static List<AclEntry> buildAndValidateAcl(ArrayList<AclEntry> aclBuilder) throws AclException {
    aclBuilder.trimToSize();
    Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR);
    // Full iteration to check for duplicates and invalid named entries.
    AclEntry prevEntry = null;
    for (AclEntry entry : aclBuilder) {
        if (prevEntry != null && ACL_ENTRY_COMPARATOR.compare(prevEntry, entry) == 0) {
            throw new AclException("Invalid ACL: multiple entries with same scope, type and name.");
        }
        if (entry.getName() != null && (entry.getType() == MASK || entry.getType() == OTHER)) {
            throw new AclException("Invalid ACL: this entry type must not have a name: " + entry + ".");
        }
        prevEntry = entry;
    }
    ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
    checkMaxEntries(scopedEntries);
    // then do the same check on the default entries.
    for (AclEntryType type : EnumSet.of(USER, GROUP, OTHER)) {
        AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS).setType(type).build();
        if (Collections.binarySearch(scopedEntries.getAccessEntries(), accessEntryKey, ACL_ENTRY_COMPARATOR) < 0) {
            throw new AclException("Invalid ACL: the user, group and other entries are required.");
        }
        if (!scopedEntries.getDefaultEntries().isEmpty()) {
            AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT).setType(type).build();
            if (Collections.binarySearch(scopedEntries.getDefaultEntries(), defaultEntryKey, ACL_ENTRY_COMPARATOR) < 0) {
                throw new AclException("Invalid default ACL: the user, group and other entries are required.");
            }
        }
    }
    return Collections.unmodifiableList(aclBuilder);
}
Also used : ScopedAclEntries(org.apache.hadoop.fs.permission.ScopedAclEntries) AclEntryType(org.apache.hadoop.fs.permission.AclEntryType) AclEntry(org.apache.hadoop.fs.permission.AclEntry) AclException(org.apache.hadoop.hdfs.protocol.AclException)

Example 4 with ScopedAclEntries

use of org.apache.hadoop.fs.permission.ScopedAclEntries in project hadoop by apache.

the class AclStorage method copyINodeDefaultAcl.

/**
   * If a default ACL is defined on a parent directory, then copies that default
   * ACL to a newly created child file or directory.
   *
   * @param child INode newly created child
   */
public static boolean copyINodeDefaultAcl(INode child) {
    INodeDirectory parent = child.getParent();
    AclFeature parentAclFeature = parent.getAclFeature();
    if (parentAclFeature == null || !(child.isFile() || child.isDirectory())) {
        return false;
    }
    // Split parent's entries into access vs. default.
    List<AclEntry> featureEntries = getEntriesFromAclFeature(parent.getAclFeature());
    ScopedAclEntries scopedEntries = new ScopedAclEntries(featureEntries);
    List<AclEntry> parentDefaultEntries = scopedEntries.getDefaultEntries();
    // The parent may have an access ACL but no default ACL.  If so, exit.
    if (parentDefaultEntries.isEmpty()) {
        return false;
    }
    // Pre-allocate list size for access entries to copy from parent.
    List<AclEntry> accessEntries = Lists.newArrayListWithCapacity(parentDefaultEntries.size());
    FsPermission childPerm = child.getFsPermission();
    // Copy each default ACL entry from parent to new child's access ACL.
    boolean parentDefaultIsMinimal = AclUtil.isMinimalAcl(parentDefaultEntries);
    for (AclEntry entry : parentDefaultEntries) {
        AclEntryType type = entry.getType();
        String name = entry.getName();
        AclEntry.Builder builder = new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(type).setName(name);
        // The child's initial permission bits are treated as the mode parameter,
        // which can filter copied permission values for owner, mask and other.
        final FsAction permission;
        if (type == AclEntryType.USER && name == null) {
            permission = entry.getPermission().and(childPerm.getUserAction());
        } else if (type == AclEntryType.GROUP && parentDefaultIsMinimal) {
            // This only happens if the default ACL is a minimal ACL: exactly 3
            // entries corresponding to owner, group and other.  In this case,
            // filter the group permissions.
            permission = entry.getPermission().and(childPerm.getGroupAction());
        } else if (type == AclEntryType.MASK) {
            // Group bits from mode parameter filter permission of mask entry.
            permission = entry.getPermission().and(childPerm.getGroupAction());
        } else if (type == AclEntryType.OTHER) {
            permission = entry.getPermission().and(childPerm.getOtherAction());
        } else {
            permission = entry.getPermission();
        }
        builder.setPermission(permission);
        accessEntries.add(builder.build());
    }
    // A new directory also receives a copy of the parent's default ACL.
    List<AclEntry> defaultEntries = child.isDirectory() ? parentDefaultEntries : Collections.<AclEntry>emptyList();
    final FsPermission newPerm;
    if (!AclUtil.isMinimalAcl(accessEntries) || !defaultEntries.isEmpty()) {
        // Save the new ACL to the child.
        child.addAclFeature(createAclFeature(accessEntries, defaultEntries));
        newPerm = createFsPermissionForExtendedAcl(accessEntries, childPerm);
    } else {
        // The child is receiving a minimal ACL.
        newPerm = createFsPermissionForMinimalAcl(accessEntries, childPerm);
    }
    child.setPermission(newPerm);
    return true;
}
Also used : FsAction(org.apache.hadoop.fs.permission.FsAction) ScopedAclEntries(org.apache.hadoop.fs.permission.ScopedAclEntries) AclEntryType(org.apache.hadoop.fs.permission.AclEntryType) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 5 with ScopedAclEntries

use of org.apache.hadoop.fs.permission.ScopedAclEntries in project hadoop by apache.

the class AclTransformation method copyDefaultsIfNeeded.

/**
   * Adds unspecified default entries by copying permissions from the
   * corresponding access entries.
   *
   * @param aclBuilder ArrayList<AclEntry> containing entries to build
   */
private static void copyDefaultsIfNeeded(List<AclEntry> aclBuilder) {
    Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR);
    ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
    if (!scopedEntries.getDefaultEntries().isEmpty()) {
        List<AclEntry> accessEntries = scopedEntries.getAccessEntries();
        List<AclEntry> defaultEntries = scopedEntries.getDefaultEntries();
        List<AclEntry> copiedEntries = Lists.newArrayListWithCapacity(3);
        for (AclEntryType type : EnumSet.of(USER, GROUP, OTHER)) {
            AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT).setType(type).build();
            int defaultEntryIndex = Collections.binarySearch(defaultEntries, defaultEntryKey, ACL_ENTRY_COMPARATOR);
            if (defaultEntryIndex < 0) {
                AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS).setType(type).build();
                int accessEntryIndex = Collections.binarySearch(accessEntries, accessEntryKey, ACL_ENTRY_COMPARATOR);
                if (accessEntryIndex >= 0) {
                    copiedEntries.add(new AclEntry.Builder().setScope(DEFAULT).setType(type).setPermission(accessEntries.get(accessEntryIndex).getPermission()).build());
                }
            }
        }
        // Add all copied entries when done to prevent potential issues with binary
        // search on a modified aclBulider during the main loop.
        aclBuilder.addAll(copiedEntries);
    }
}
Also used : ScopedAclEntries(org.apache.hadoop.fs.permission.ScopedAclEntries) AclEntryType(org.apache.hadoop.fs.permission.AclEntryType) AclEntry(org.apache.hadoop.fs.permission.AclEntry)

Aggregations

AclEntry (org.apache.hadoop.fs.permission.AclEntry)5 ScopedAclEntries (org.apache.hadoop.fs.permission.ScopedAclEntries)5 AclEntryType (org.apache.hadoop.fs.permission.AclEntryType)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 AclException (org.apache.hadoop.hdfs.protocol.AclException)2 FsAction (org.apache.hadoop.fs.permission.FsAction)1