use of org.apache.hadoop.hdfs.protocol.AclException in project hadoop by apache.
the class AclStorage method updateINodeAcl.
/**
* Updates an inode with a new ACL. This method takes a full logical ACL and
* stores the entries to the inode's {@link FsPermission} and
* {@link AclFeature}.
*
* @param inode INode to update
* @param newAcl List<AclEntry> containing new ACL entries
* @param snapshotId int latest snapshot ID of inode
* @throws AclException if the ACL is invalid for the given inode
* @throws QuotaExceededException if quota limit is exceeded
*/
public static void updateINodeAcl(INode inode, List<AclEntry> newAcl, int snapshotId) throws AclException, QuotaExceededException {
assert newAcl.size() >= 3;
FsPermission perm = inode.getFsPermission();
final FsPermission newPerm;
if (!AclUtil.isMinimalAcl(newAcl)) {
// This is an extended ACL. Split entries into access vs. default.
ScopedAclEntries scoped = new ScopedAclEntries(newAcl);
List<AclEntry> accessEntries = scoped.getAccessEntries();
List<AclEntry> defaultEntries = scoped.getDefaultEntries();
// Only directories may have a default ACL.
if (!defaultEntries.isEmpty() && !inode.isDirectory()) {
throw new AclException("Invalid ACL: only directories may have a default ACL.");
}
// Attach entries to the feature.
if (inode.getAclFeature() != null) {
inode.removeAclFeature(snapshotId);
}
inode.addAclFeature(createAclFeature(accessEntries, defaultEntries), snapshotId);
newPerm = createFsPermissionForExtendedAcl(accessEntries, perm);
} else {
// This is a minimal ACL. Remove the ACL feature if it previously had one.
if (inode.getAclFeature() != null) {
inode.removeAclFeature(snapshotId);
}
newPerm = createFsPermissionForMinimalAcl(newAcl, perm);
}
inode.setPermission(newPerm, snapshotId);
}
use of org.apache.hadoop.hdfs.protocol.AclException in project hadoop by apache.
the class AclTransformation method buildAndValidateAcl.
/**
* Builds the final list of ACL entries to return by trimming, sorting and
* validating the ACL entries that have been added.
*
* @param aclBuilder ArrayList<AclEntry> containing entries to build
* @return List<AclEntry> unmodifiable, sorted list of ACL entries
* @throws AclException if validation fails
*/
private static List<AclEntry> buildAndValidateAcl(ArrayList<AclEntry> aclBuilder) throws AclException {
aclBuilder.trimToSize();
Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR);
// Full iteration to check for duplicates and invalid named entries.
AclEntry prevEntry = null;
for (AclEntry entry : aclBuilder) {
if (prevEntry != null && ACL_ENTRY_COMPARATOR.compare(prevEntry, entry) == 0) {
throw new AclException("Invalid ACL: multiple entries with same scope, type and name.");
}
if (entry.getName() != null && (entry.getType() == MASK || entry.getType() == OTHER)) {
throw new AclException("Invalid ACL: this entry type must not have a name: " + entry + ".");
}
prevEntry = entry;
}
ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder);
checkMaxEntries(scopedEntries);
// then do the same check on the default entries.
for (AclEntryType type : EnumSet.of(USER, GROUP, OTHER)) {
AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS).setType(type).build();
if (Collections.binarySearch(scopedEntries.getAccessEntries(), accessEntryKey, ACL_ENTRY_COMPARATOR) < 0) {
throw new AclException("Invalid ACL: the user, group and other entries are required.");
}
if (!scopedEntries.getDefaultEntries().isEmpty()) {
AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT).setType(type).build();
if (Collections.binarySearch(scopedEntries.getDefaultEntries(), defaultEntryKey, ACL_ENTRY_COMPARATOR) < 0) {
throw new AclException("Invalid default ACL: the user, group and other entries are required.");
}
}
}
return Collections.unmodifiableList(aclBuilder);
}
use of org.apache.hadoop.hdfs.protocol.AclException in project pravega by pravega.
the class SealOperation method run.
@Override
public void run() throws IOException, StorageNotPrimaryException {
HDFSSegmentHandle handle = getTarget();
long traceId = LoggerHelpers.traceEnter(log, "seal", handle);
val lastHandleFile = handle.getLastFile();
try {
if (!lastHandleFile.isReadOnly()) {
if (!makeReadOnly(lastHandleFile)) {
// The file's read-only status changed externally. Figure out if we have been fenced out.
checkForFenceOut(handle.getSegmentName(), -1, handle.getLastFile());
// We are ok, just update the FileDescriptor internally.
lastHandleFile.markReadOnly();
}
}
// Set the Sealed attribute on the last file and update the handle.
makeSealed(lastHandleFile);
} catch (FileNotFoundException | AclException ex) {
checkForFenceOut(handle.getSegmentName(), handle.getFiles().size(), handle.getLastFile());
// If we were not fenced out, then this is a legitimate exception - rethrow it.
throw ex;
}
if (lastHandleFile.getLength() == 0) {
// Last file was actually empty, so if we have more than one file, mark the second-to-last as sealed and
// remove the last one.
val handleFiles = handle.getFiles();
if (handleFiles.size() > 1) {
makeSealed(handleFiles.get(handleFiles.size() - 2));
deleteFile(lastHandleFile);
handle.removeLastFile();
}
}
LoggerHelpers.traceLeave(log, "seal", traceId, handle);
}
use of org.apache.hadoop.hdfs.protocol.AclException in project pravega by pravega.
the class UnsealOperation method run.
@Override
public void run() throws IOException, StorageNotPrimaryException {
HDFSSegmentHandle handle = getTarget();
long traceId = LoggerHelpers.traceEnter(log, "unseal", handle);
val lastHandleFile = handle.getLastFile();
try {
if (lastHandleFile.isReadOnly()) {
makeReadWrite(lastHandleFile);
}
// Set the Sealed attribute on the last file and update the handle.
if (isSealed(lastHandleFile)) {
makeUnsealed(lastHandleFile);
}
} catch (FileNotFoundException | AclException ex) {
checkForFenceOut(handle.getSegmentName(), handle.getFiles().size(), handle.getLastFile());
// If we were not fenced out, then this is a legitimate exception - rethrow it.
throw ex;
}
LoggerHelpers.traceLeave(log, "unseal", traceId, handle);
}
use of org.apache.hadoop.hdfs.protocol.AclException in project pravega by pravega.
the class ConcatOperationTests method testInvalidInput.
/**
* Tests various combinations of bad input to the Concat command.
*/
@Test
public void testInvalidInput() throws Exception {
@Cleanup val fs = new MockFileSystem();
val context = newContext(1, fs);
val targetHandle = createNonEmptySegment(TARGET_SEGMENT, context, null);
val sourceHandle = createNonEmptySegment(SOURCE_SEGMENT, context, null);
// Same source and target.
AssertExtensions.assertThrows("ConcatOperation worked with same source and target.", () -> new ConcatOperation(targetHandle, WRITE_LENGTH, TARGET_SEGMENT, context).run(), ex -> ex instanceof IllegalArgumentException);
// Verify source is not sealed.
AssertExtensions.assertThrows("ConcatOperation worked on non-sealed source segment.", new ConcatOperation(targetHandle, WRITE_LENGTH, SOURCE_SEGMENT, context)::run, ex -> ex instanceof IllegalStateException);
// Seal it.
new SealOperation(sourceHandle, context).run();
// Verify target offset.
AssertExtensions.assertThrows("ConcatOperation worked with bad offset.", new ConcatOperation(targetHandle, WRITE_LENGTH - 1, SOURCE_SEGMENT, context)::run, ex -> ex instanceof BadOffsetException);
// Verify target is sealed.
new SealOperation(targetHandle, context).run();
AssertExtensions.assertThrows("ConcatOperation worked with sealed target.", new ConcatOperation(targetHandle, WRITE_LENGTH, SOURCE_SEGMENT, context)::run, ex -> ex instanceof AclException);
}
Aggregations