use of org.apache.hadoop.hdfs.protocol.AclException in project alluxio by Alluxio.
the class SupportedHdfsAclProvider method getAcl.
@Override
public Pair<AccessControlList, DefaultAccessControlList> getAcl(FileSystem hdfs, String path) throws IOException {
AclStatus hdfsAcl;
Path filePath = new Path(path);
boolean isDir = hdfs.isDirectory(filePath);
try {
hdfsAcl = hdfs.getAclStatus(filePath);
} catch (AclException e) {
// When dfs.namenode.acls.enabled is false, getAclStatus throws AclException.
return new Pair<>(null, null);
}
AccessControlList acl = new AccessControlList();
DefaultAccessControlList defaultAcl = new DefaultAccessControlList();
acl.setOwningUser(hdfsAcl.getOwner().intern());
acl.setOwningGroup(hdfsAcl.getGroup().intern());
defaultAcl.setOwningUser(hdfsAcl.getOwner());
defaultAcl.setOwningGroup(hdfsAcl.getGroup());
for (AclEntry entry : hdfsAcl.getEntries()) {
alluxio.security.authorization.AclEntry.Builder builder = new alluxio.security.authorization.AclEntry.Builder();
builder.setType(getAclEntryType(entry));
builder.setSubject(entry.getName() == null ? "" : entry.getName());
FsAction permission = entry.getPermission();
if (permission.implies(FsAction.READ)) {
builder.addAction(AclAction.READ);
} else if (permission.implies(FsAction.WRITE)) {
builder.addAction(AclAction.WRITE);
} else if (permission.implies(FsAction.EXECUTE)) {
builder.addAction(AclAction.EXECUTE);
}
if (entry.getScope().equals(AclEntryScope.ACCESS)) {
acl.setEntry(builder.build());
} else {
// default ACL, must be a directory
defaultAcl.setEntry(builder.build());
}
}
if (isDir) {
return new Pair<>(acl, defaultAcl);
} else {
// a null defaultACL indicates this is a file
return new Pair<>(acl, null);
}
}
use of org.apache.hadoop.hdfs.protocol.AclException in project hadoop by apache.
the class AclTransformation method calculateMasks.
/**
* Calculates mask entries required for the ACL. Mask calculation is performed
* separately for each scope: access and default. This method is responsible
* for handling the following cases of mask calculation:
* 1. Throws an exception if the caller attempts to remove the mask entry of an
* existing ACL that requires it. If the ACL has any named entries, then a
* mask entry is required.
* 2. If the caller supplied a mask in the ACL spec, use it.
* 3. If the caller did not supply a mask, but there are ACL entry changes in
* this scope, then automatically calculate a new mask. The permissions of
* the new mask are the union of the permissions on the group entry and all
* named entries.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
* @param providedMask EnumMap<AclEntryScope, AclEntry> mapping each scope to
* the mask entry that was provided for that scope (if provided)
* @param maskDirty EnumSet<AclEntryScope> which contains a scope if the mask
* entry is dirty (added or deleted) in that scope
* @param scopeDirty EnumSet<AclEntryScope> which contains a scope if any entry
* is dirty (added or deleted) in that scope
* @throws AclException if validation fails
*/
private static void calculateMasks(List<AclEntry> aclBuilder, EnumMap<AclEntryScope, AclEntry> providedMask, EnumSet<AclEntryScope> maskDirty, EnumSet<AclEntryScope> scopeDirty) throws AclException {
EnumSet<AclEntryScope> scopeFound = EnumSet.noneOf(AclEntryScope.class);
EnumMap<AclEntryScope, FsAction> unionPerms = Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskNeeded = EnumSet.noneOf(AclEntryScope.class);
// union of group class permissions in each scope.
for (AclEntry entry : aclBuilder) {
scopeFound.add(entry.getScope());
if (entry.getType() == GROUP || entry.getName() != null) {
FsAction scopeUnionPerms = Objects.firstNonNull(unionPerms.get(entry.getScope()), FsAction.NONE);
unionPerms.put(entry.getScope(), scopeUnionPerms.or(entry.getPermission()));
}
if (entry.getName() != null) {
maskNeeded.add(entry.getScope());
}
}
// Add mask entry if needed in each scope.
for (AclEntryScope scope : scopeFound) {
if (!providedMask.containsKey(scope) && maskNeeded.contains(scope) && maskDirty.contains(scope)) {
// Caller explicitly removed mask entry, but it's required.
throw new AclException("Invalid ACL: mask is required and cannot be deleted.");
} else if (providedMask.containsKey(scope) && (!scopeDirty.contains(scope) || maskDirty.contains(scope))) {
// Caller explicitly provided new mask, or we are preserving the existing
// mask in an unchanged scope.
aclBuilder.add(providedMask.get(scope));
} else if (maskNeeded.contains(scope) || providedMask.containsKey(scope)) {
// Otherwise, if there are maskable entries present, or the ACL
// previously had a mask, then recalculate a mask automatically.
aclBuilder.add(new AclEntry.Builder().setScope(scope).setType(MASK).setPermission(unionPerms.get(scope)).build());
}
}
}
use of org.apache.hadoop.hdfs.protocol.AclException in project pravega by pravega.
the class ConcatOperation method run.
@Override
public void run() throws IOException, BadOffsetException, StreamSegmentSealedException, StorageNotPrimaryException {
HDFSSegmentHandle target = getTarget();
long traceId = LoggerHelpers.traceEnter(log, "concat", target, this.offset, this.sourceSegmentName);
// Check for target offset and whether it is sealed.
FileDescriptor lastFile = this.target.getLastFile();
validate(lastFile);
// Get all files for source handle (ignore handle contents and refresh from file system). Verify it is sealed.
val sourceFiles = findAll(this.sourceSegmentName, true);
Preconditions.checkState(isSealed(sourceFiles.get(sourceFiles.size() - 1)), "Cannot concat segment '%s' into '%s' because it is not sealed.", this.sourceSegmentName, this.target.getSegmentName());
if (sourceFiles.get(sourceFiles.size() - 1).getLastOffset() == 0) {
// Quick bail-out: source segment is empty, simply delete it.
log.debug("Source Segment '%s' is empty. No concat will be performed. Source Segment will be deleted.", this.sourceSegmentName);
val readHandle = HDFSSegmentHandle.read(this.sourceSegmentName, sourceFiles);
new DeleteOperation(readHandle, this.context).run();
LoggerHelpers.traceLeave(log, "concat", traceId, this.target, this.offset, this.sourceSegmentName);
return;
}
try {
// Concat source files into target and update the handle.
FileDescriptor newLastFile = combine(lastFile, sourceFiles, false);
this.target.replaceLastFile(newLastFile);
} catch (FileNotFoundException | AclException ex) {
checkForFenceOut(this.target.getSegmentName(), this.target.getFiles().size(), lastFile);
// If we were not fenced out, then this is a legitimate exception - rethrow it.
throw ex;
}
LoggerHelpers.traceLeave(log, "concat", traceId, target, this.offset, this.sourceSegmentName);
}
use of org.apache.hadoop.hdfs.protocol.AclException in project pravega by pravega.
the class WriteOperation method run.
@Override
public void run() throws BadOffsetException, IOException, StorageNotPrimaryException {
HDFSSegmentHandle handle = getTarget();
long traceId = LoggerHelpers.traceEnter(log, "write", handle, this.offset, this.length);
FileDescriptor lastFile = handle.getLastFile();
Timer timer = new Timer();
try (FSDataOutputStream stream = this.context.fileSystem.append(lastFile.getPath())) {
if (this.offset != lastFile.getLastOffset()) {
// before we throw BadOffsetException.
throw new BadOffsetException(handle.getSegmentName(), lastFile.getLastOffset(), this.offset);
} else if (stream.getPos() != lastFile.getLength()) {
// Looks like the filesystem changed from underneath us. This could be our bug, but it could be something else.
// Update our knowledge of the filesystem and throw a BadOffsetException - this should cause upstream code
// to try to reconcile; if it can't then the upstream code should shut down or take other appropriate measures.
log.warn("File changed detected for '{}'. Expected length = {}, actual length = {}.", lastFile, lastFile.getLength(), stream.getPos());
lastFile.setLength(stream.getPos());
throw new BadOffsetException(handle.getSegmentName(), lastFile.getLastOffset(), this.offset);
}
if (this.length == 0) {
// Note: IOUtils.copyBytes with length == 0 will enter an infinite loop, hence the need for this check.
return;
}
// We need to be very careful with IOUtils.copyBytes. There are many overloads with very similar signatures.
// There is a difference between (InputStream, OutputStream, int, boolean) and (InputStream, OutputStream, long, boolean),
// in that the one with "int" uses the third arg as a buffer size, and the one with "long" uses it as the number
// of bytes to copy.
IOUtils.copyBytes(this.data, stream, (long) this.length, false);
stream.flush();
lastFile.increaseLength(this.length);
} catch (FileNotFoundException | AclException ex) {
checkForFenceOut(handle.getSegmentName(), handle.getFiles().size(), handle.getLastFile());
// If we were not fenced out, then this is a legitimate exception - rethrow it.
throw ex;
}
HDFSMetrics.WRITE_LATENCY.reportSuccessEvent(timer.getElapsed());
HDFSMetrics.WRITE_BYTES.add(this.length);
LoggerHelpers.traceLeave(log, "write", traceId, handle, offset, length);
}
Aggregations