use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestHDFSTrash method testDeleteTrash.
@Test
public void testDeleteTrash() throws Exception {
Configuration testConf = new Configuration(conf);
testConf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10");
Path user1Tmp = new Path(TEST_ROOT, "test-del-u1");
Path user2Tmp = new Path(TEST_ROOT, "test-del-u2");
// login as user1, move something to trash
// verify user1 can remove its own trash dir
fs = DFSTestUtil.login(fs, testConf, user1);
fs.mkdirs(user1Tmp);
Trash u1Trash = getPerUserTrash(user1, fs, testConf);
Path u1t = u1Trash.getCurrentTrashDir(user1Tmp);
assertTrue(String.format("Failed to move %s to trash", user1Tmp), u1Trash.moveToTrash(user1Tmp));
assertTrue(String.format("%s should be allowed to remove its own trash directory %s", user1.getUserName(), u1t), fs.delete(u1t, true));
assertFalse(fs.exists(u1t));
// login as user2, move something to trash
fs = DFSTestUtil.login(fs, testConf, user2);
fs.mkdirs(user2Tmp);
Trash u2Trash = getPerUserTrash(user2, fs, testConf);
u2Trash.moveToTrash(user2Tmp);
Path u2t = u2Trash.getCurrentTrashDir(user2Tmp);
try {
// user1 should not be able to remove user2's trash dir
fs = DFSTestUtil.login(fs, testConf, user1);
fs.delete(u2t, true);
fail(String.format("%s should not be able to remove %s trash directory", USER1_NAME, USER2_NAME));
} catch (AccessControlException e) {
assertTrue(e instanceof AccessControlException);
assertTrue("Permission denied messages must carry the username", e.getMessage().contains(USER1_NAME));
}
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestDelegationToken method testDelegationTokenSecretManager.
@Test
public void testDelegationTokenSecretManager() throws Exception {
Token<DelegationTokenIdentifier> token = generateDelegationToken("SomeUser", "JobTracker");
// Fake renewer should not be able to renew
try {
dtSecretManager.renewToken(token, "FakeRenewer");
Assert.fail("should have failed");
} catch (AccessControlException ace) {
// PASS
}
dtSecretManager.renewToken(token, "JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(6000);
//Token should be expired
try {
dtSecretManager.retrievePassword(identifier);
//Should not come here
Assert.fail("Token should have expired");
} catch (InvalidToken e) {
//Success
}
dtSecretManager.renewToken(token, "JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(5000);
try {
dtSecretManager.renewToken(token, "JobTracker");
Assert.fail("should have been expired");
} catch (InvalidToken it) {
// PASS
}
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method setErasureCodingPolicy.
/**
* Set an erasure coding policy on the given path.
* @param srcArg The path of the target directory.
* @param ecPolicyName The erasure coding policy to set on the target
* directory.
* @throws AccessControlException if the caller is not the superuser.
* @throws UnresolvedLinkException if the path can't be resolved.
* @throws SafeModeException if the Namenode is in safe mode.
*/
void setErasureCodingPolicy(final String srcArg, final String ecPolicyName, final boolean logRetryCache) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException {
final String operationName = "setErasureCodingPolicy";
checkOperation(OperationCategory.WRITE);
HdfsFileStatus resultingStat = null;
final FSPermissionChecker pc = getPermissionChecker();
boolean success = false;
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set erasure coding policy on " + srcArg);
resultingStat = FSDirErasureCodingOp.setErasureCodingPolicy(this, srcArg, ecPolicyName, pc, logRetryCache);
success = true;
} catch (AccessControlException ace) {
logAuditEvent(success, operationName, srcArg, null, resultingStat);
throw ace;
} finally {
writeUnlock(operationName);
if (success) {
getEditLog().logSync();
}
}
logAuditEvent(success, operationName, srcArg, null, resultingStat);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method removeAcl.
void removeAcl(final String src) throws IOException {
final String operationName = "removeAcl";
HdfsFileStatus auditStat = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot remove ACL on " + src);
auditStat = FSDirAclOp.removeAcl(dir, src);
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
logAuditEvent(true, operationName, src, null, auditStat);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class FSNamesystem method delete.
/**
* Remove the indicated file from namespace.
*
* @see ClientProtocol#delete(String, boolean) for detailed description and
* description of exceptions
*/
boolean delete(String src, boolean recursive, boolean logRetryCache) throws IOException {
final String operationName = "delete";
BlocksMapUpdateInfo toRemovedBlocks = null;
writeLock();
boolean ret = false;
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot delete " + src);
toRemovedBlocks = FSDirDeleteOp.delete(this, src, recursive, logRetryCache);
ret = toRemovedBlocks != null;
} catch (AccessControlException e) {
logAuditEvent(false, operationName, src);
throw e;
} finally {
writeUnlock(operationName);
}
getEditLog().logSync();
if (toRemovedBlocks != null) {
// Incremental deletion of blocks
removeBlocks(toRemovedBlocks);
}
logAuditEvent(true, operationName, src);
return ret;
}
Aggregations