use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method modifyAclEntries.
void modifyAclEntries(final String src, List<AclEntry> aclSpec) throws IOException {
final String operationName = "modifyAclEntries";
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
auditStat = FSDirAclOp.modifyAclEntries(dir, src, aclSpec);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method setOwner.
/**
* Set owner for an existing file.
* @throws IOException
*/
void setOwner(String src, String username, String group) throws IOException {
final String operationName = "setOwner";
HdfsFileStatus auditStat;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set owner for " + src);
auditStat = FSDirAttrOp.setOwner(dir, src, username, group);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method getContentSummary.
/**
* Get the content summary for a specific file/dir.
*
* @param src The string representation of the path to the file
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if a symlink is encountered.
* @throws FileNotFoundException if no file exists
* @throws StandbyException
* @throws IOException for issues with writing to the audit log
*
* @return object containing information regarding the file
* or null if file not found
*/
ContentSummary getContentSummary(final String src) throws IOException {
checkOperation(OperationCategory.READ);
final String operationName = "contentSummary";
readLock();
boolean success = true;
ContentSummary cs;
try {
checkOperation(OperationCategory.READ);
cs = FSDirStatAndListingOp.getContentSummary(dir, src);
} catch (AccessControlException ace) {
success = false;
logAuditEvent(success, operationName, src);
throw ace;
} finally {
readUnlock(operationName);
}
logAuditEvent(success, operationName, src);
return cs;
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method getBlockLocations.
/**
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
*/
LocatedBlocks getBlockLocations(String clientMachine, String srcArg, long offset, long length) throws IOException {
final String operationName = "open";
checkOperation(OperationCategory.READ);
GetBlockLocationsResult res = null;
FSPermissionChecker pc = getPermissionChecker();
readLock();
try {
checkOperation(OperationCategory.READ);
res = FSDirStatAndListingOp.getBlockLocations(dir, pc, srcArg, offset, length, true);
if (isInSafeMode()) {
for (LocatedBlock b : res.blocks.getLocatedBlocks()) {
// if safemode & no block locations yet then throw safemodeException
if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
SafeModeException se = newSafemodeException("Zero blocklocations for " + srcArg);
if (haEnabled && haContext != null && haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
throw new RetriableException(se);
} else {
throw se;
}
}
}
}
} catch (AccessControlException e) {
logAuditEvent(false, operationName, srcArg);
throw e;
} finally {
readUnlock(operationName);
}
logAuditEvent(true, operationName, srcArg);
if (!isInSafeMode() && res.updateAccessTime()) {
String src = srcArg;
writeLock();
final long now = now();
try {
checkOperation(OperationCategory.WRITE);
/**
* Resolve the path again and update the atime only when the file
* exists.
*
* XXX: Races can still occur even after resolving the path again.
* For example:
*
* <ul>
* <li>Get the block location for "/a/b"</li>
* <li>Rename "/a/b" to "/c/b"</li>
* <li>The second resolution still points to "/a/b", which is
* wrong.</li>
* </ul>
*
* The behavior is incorrect but consistent with the one before
* HDFS-7463. A better fix is to change the edit log of SetTime to
* use inode id instead of a path.
*/
final INodesInPath iip = dir.resolvePath(pc, srcArg, DirOp.READ);
src = iip.getPath();
INode inode = iip.getLastINode();
boolean updateAccessTime = inode != null && now > inode.getAccessTime() + dir.getAccessTimePrecision();
if (!isInSafeMode() && updateAccessTime) {
boolean changed = FSDirAttrOp.setTimes(dir, iip, -1, now, false);
if (changed) {
getEditLog().logTimes(src, -1, now);
}
}
} catch (Throwable e) {
LOG.warn("Failed to update the access time of " + src, e);
} finally {
writeUnlock(operationName);
}
}
LocatedBlocks blocks = res.blocks;
sortLocatedBlocks(clientMachine, blocks);
return blocks;
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method unsetStoragePolicy.
/**
* unset storage policy set for a given file or a directory.
*
* @param src file/directory path
*/
void unsetStoragePolicy(String src) throws IOException {
final String operationName = "unsetStoragePolicy";
HdfsFileStatus auditStat;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot unset storage policy for " + src);
auditStat = FSDirAttrOp.unsetStoragePolicy(dir, blockManager, src);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
Aggregations