Search in sources :

Example 31 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class JsonUtilClient method toFsPermission.

/** Convert a string to a FsPermission object. */
static FsPermission toFsPermission(final String s, Boolean aclBit, Boolean encBit) {
    FsPermission perm = new FsPermission(Short.parseShort(s, 8));
    final boolean aBit = (aclBit != null) ? aclBit : false;
    final boolean eBit = (encBit != null) ? encBit : false;
    if (aBit || eBit) {
        return new FsPermissionExtension(perm, aBit, eBit);
    } else {
        return perm;
    }
}
Also used : FsPermissionExtension(org.apache.hadoop.hdfs.protocol.FsPermissionExtension) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 32 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class PBHelperClient method convert.

public static CachePoolInfo convert(CachePoolInfoProto proto) {
    // Pool name is a required field, the rest are optional
    String poolName = Preconditions.checkNotNull(proto.getPoolName());
    CachePoolInfo info = new CachePoolInfo(poolName);
    if (proto.hasOwnerName()) {
        info.setOwnerName(proto.getOwnerName());
    }
    if (proto.hasGroupName()) {
        info.setGroupName(proto.getGroupName());
    }
    if (proto.hasMode()) {
        info.setMode(new FsPermission((short) proto.getMode()));
    }
    if (proto.hasLimit()) {
        info.setLimit(proto.getLimit());
    }
    if (proto.hasDefaultReplication()) {
        info.setDefaultReplication(Shorts.checkedCast(proto.getDefaultReplication()));
    }
    if (proto.hasMaxRelativeExpiry()) {
        info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
    }
    return info;
}
Also used : ByteString(com.google.protobuf.ByteString) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo)

Example 33 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class FSDirAclOp method unprotectedRemoveAcl.

private static void unprotectedRemoveAcl(FSDirectory fsd, INodesInPath iip) throws IOException {
    assert fsd.hasWriteLock();
    INode inode = FSDirectory.resolveLastINode(iip);
    int snapshotId = iip.getLatestSnapshotId();
    AclFeature f = inode.getAclFeature();
    if (f == null) {
        return;
    }
    FsPermission perm = inode.getFsPermission();
    List<AclEntry> featureEntries = AclStorage.getEntriesFromAclFeature(f);
    if (featureEntries.get(0).getScope() == AclEntryScope.ACCESS) {
        // Restore group permissions from the feature's entry to permission
        // bits, overwriting the mask, which is not part of a minimal ACL.
        AclEntry groupEntryKey = new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.GROUP).build();
        int groupEntryIndex = Collections.binarySearch(featureEntries, groupEntryKey, AclTransformation.ACL_ENTRY_COMPARATOR);
        Preconditions.checkPositionIndex(groupEntryIndex, featureEntries.size(), "Invalid group entry index after binary-searching inode: " + inode.getFullPathName() + "(" + inode.getId() + ") " + "with featureEntries:" + featureEntries);
        FsAction groupPerm = featureEntries.get(groupEntryIndex).getPermission();
        FsPermission newPerm = new FsPermission(perm.getUserAction(), groupPerm, perm.getOtherAction(), perm.getStickyBit());
        inode.setPermission(newPerm, snapshotId);
    }
    inode.removeAclFeature(snapshotId);
}
Also used : FsAction(org.apache.hadoop.fs.permission.FsAction) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 34 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class FSDirMkdirOp method addImplicitUwx.

private static PermissionStatus addImplicitUwx(PermissionStatus parentPerm, PermissionStatus perm) {
    FsPermission p = parentPerm.getPermission();
    FsPermission ancestorPerm = new FsPermission(p.getUserAction().or(FsAction.WRITE_EXECUTE), p.getGroupAction(), p.getOtherAction());
    return new PermissionStatus(perm.getUserName(), perm.getGroupName(), ancestorPerm);
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 35 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class AclStorage method updateINodeAcl.

/**
   * Updates an inode with a new ACL.  This method takes a full logical ACL and
   * stores the entries to the inode's {@link FsPermission} and
   * {@link AclFeature}.
   *
   * @param inode INode to update
   * @param newAcl List<AclEntry> containing new ACL entries
   * @param snapshotId int latest snapshot ID of inode
   * @throws AclException if the ACL is invalid for the given inode
   * @throws QuotaExceededException if quota limit is exceeded
   */
public static void updateINodeAcl(INode inode, List<AclEntry> newAcl, int snapshotId) throws AclException, QuotaExceededException {
    assert newAcl.size() >= 3;
    FsPermission perm = inode.getFsPermission();
    final FsPermission newPerm;
    if (!AclUtil.isMinimalAcl(newAcl)) {
        // This is an extended ACL.  Split entries into access vs. default.
        ScopedAclEntries scoped = new ScopedAclEntries(newAcl);
        List<AclEntry> accessEntries = scoped.getAccessEntries();
        List<AclEntry> defaultEntries = scoped.getDefaultEntries();
        // Only directories may have a default ACL.
        if (!defaultEntries.isEmpty() && !inode.isDirectory()) {
            throw new AclException("Invalid ACL: only directories may have a default ACL.");
        }
        // Attach entries to the feature.
        if (inode.getAclFeature() != null) {
            inode.removeAclFeature(snapshotId);
        }
        inode.addAclFeature(createAclFeature(accessEntries, defaultEntries), snapshotId);
        newPerm = createFsPermissionForExtendedAcl(accessEntries, perm);
    } else {
        // This is a minimal ACL.  Remove the ACL feature if it previously had one.
        if (inode.getAclFeature() != null) {
            inode.removeAclFeature(snapshotId);
        }
        newPerm = createFsPermissionForMinimalAcl(newAcl, perm);
    }
    inode.setPermission(newPerm, snapshotId);
}
Also used : ScopedAclEntries(org.apache.hadoop.fs.permission.ScopedAclEntries) AclEntry(org.apache.hadoop.fs.permission.AclEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission) AclException(org.apache.hadoop.hdfs.protocol.AclException)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15