use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method mkdirs.
/**
* Create all the necessary directories
*/
boolean mkdirs(String src, PermissionStatus permissions, boolean createParent) throws IOException {
final String operationName = "mkdirs";
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create directory " + src);
auditStat = FSDirMkdirOp.mkdirs(this, src, permissions, createParent);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
return true;
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method removeDefaultAcl.
void removeDefaultAcl(final String src) throws IOException {
final String operationName = "removeDefaultAcl";
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
auditStat = FSDirAclOp.removeDefaultAcl(dir, src);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method truncate.
/**
* Truncate file to a lower length.
* Truncate cannot be reverted / recovered from as it causes data loss.
* Truncation at block boundary is atomic, otherwise it requires
* block recovery to truncate the last block of the file.
*
* @return true if client does not need to wait for block recovery,
* false if client needs to wait for block recovery.
*/
boolean truncate(String src, long newLength, String clientName, String clientMachine, long mtime) throws IOException, UnresolvedLinkException {
final String operationName = "truncate";
requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE);
final FSDirTruncateOp.TruncateResult r;
try {
NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src={} newLength={}", src, newLength);
if (newLength < 0) {
throw new HadoopIllegalArgumentException("Cannot truncate to a negative file size: " + newLength + ".");
}
final FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
writeLock();
BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot truncate for " + src);
r = FSDirTruncateOp.truncate(this, src, newLength, clientName, clientMachine, mtime, toRemoveBlocks, pc);
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
if (!toRemoveBlocks.getToDeleteList().isEmpty()) {
removeBlocks(toRemoveBlocks);
toRemoveBlocks.clear();
}
logAuditEvent(true, operationName, src, null, r.getFileStatus());
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
}
return r.getResult();
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method setTimes.
/**
* stores the modification and access time for this inode.
* The access time is precise up to an hour. The transaction, if needed, is
* written to the edits log but is not flushed.
*/
void setTimes(String src, long mtime, long atime) throws IOException {
final String operationName = "setTimes";
HdfsFileStatus auditStat;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set times " + src);
auditStat = FSDirAttrOp.setTimes(dir, src, mtime, atime);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method createEncryptionZone.
/**
* Create an encryption zone on directory src using the specified key.
*
* @param src the path of a directory which will be the root of the
* encryption zone. The directory must be empty.
* @param keyName name of a key which must be present in the configured
* KeyProvider.
* @throws AccessControlException if the caller is not the superuser.
* @throws UnresolvedLinkException if the path can't be resolved.
* @throws SafeModeException if the Namenode is in safe mode.
*/
void createEncryptionZone(final String src, final String keyName, boolean logRetryCache) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException {
final String operationName = "createEncryptionZone";
try {
Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir, keyName, src);
checkSuperuserPrivilege();
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
final HdfsFileStatus resultingStat;
writeLock();
try {
checkSuperuserPrivilege();
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot create encryption zone on " + src);
resultingStat = FSDirEncryptionZoneOp.createEncryptionZone(dir, src, pc, metadata.getCipher(), keyName, logRetryCache);
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, resultingStat);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
}
}
Aggregations