use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestStickyBit method testStickyBitRecursiveDeleteDir.
@Test
public void testStickyBitRecursiveDeleteDir() throws Exception {
Path root = new Path("/" + GenericTestUtils.getMethodName());
Path tmp = new Path(root, "tmp");
Path dir = new Path(tmp, "dir");
Path file = new Path(dir, "file");
// Create a tmp directory with wide-open permissions and sticky bit
hdfs.mkdirs(tmp);
hdfs.setPermission(root, new FsPermission((short) 0777));
hdfs.setPermission(tmp, new FsPermission((short) 01777));
// Create a dir protected by sticky bit
hdfsAsUser1.mkdirs(dir);
hdfsAsUser1.setPermission(dir, new FsPermission((short) 0777));
// Create a file in dir
writeFile(hdfsAsUser1, file);
hdfs.setPermission(file, new FsPermission((short) 0666));
try {
hdfsAsUser2.delete(tmp, true);
fail("Non-owner can not delete a directory protected by sticky bit" + " recursively");
} catch (AccessControlException e) {
GenericTestUtils.assertExceptionContains(FSExceptionMessages.PERMISSION_DENIED_BY_STICKY_BIT, e);
}
// Owner can delete a directory protected by sticky bit recursively
hdfsAsUser1.delete(tmp, true);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestAuditLogs method testAuditDenied.
/** test that denied operation puts proper entry in audit log */
@Test
public void testAuditDenied() throws Exception {
final Path file = new Path(fnames[0]);
FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
fs.setPermission(file, new FsPermission((short) 0600));
fs.setOwner(file, "root", null);
setupAuditLogs();
try {
userfs.open(file);
fail("open must not succeed");
} catch (AccessControlException e) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogs(false);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestAuditLogs method testAuditWebHdfsDenied.
/** test that denied access via webhdfs puts proper entry in audit log */
@Test
public void testAuditWebHdfsDenied() throws Exception {
final Path file = new Path(fnames[0]);
fs.setPermission(file, new FsPermission((short) 0600));
fs.setOwner(file, "root", null);
setupAuditLogs();
try {
WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME);
InputStream istream = webfs.open(file);
int val = istream.read();
fail("open+read must not succeed, got " + val);
} catch (AccessControlException E) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogsRepeat(false, 2);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestAuditLoggerWithCommands method testSetQuota.
@Test
public void testSetQuota() throws Exception {
Path path = new Path("/testdir/testdir1");
fs.mkdirs(path);
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
try {
((DistributedFileSystem) fileSys).setQuota(path, 10l, 10l);
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
String acePattern = ".*allowed=false.*ugi=theDoctor.*cmd=setQuota.*";
int length = verifyAuditLogs(acePattern);
fileSys.close();
try {
((DistributedFileSystem) fileSys).setQuota(path, 10l, 10l);
fail("The operation should have failed with IOException");
} catch (IOException ace) {
}
assertTrue("Unexpected log from getContentSummary", length == auditlog.getOutput().split("\n").length);
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestAuditLoggerWithCommands method testRemoveCachePool.
@Test
public void testRemoveCachePool() throws Exception {
removeExistingCachePools(null);
CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0));
((DistributedFileSystem) fs).addCachePool(cacheInfo);
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
try {
((DistributedFileSystem) fileSys).removeCachePool("pool1");
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
String aceRemoveCachePoolPattern = ".*allowed=false.*ugi=theDoctor.*cmd=removeCachePool.*";
int length = verifyAuditLogs(aceRemoveCachePoolPattern);
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
try {
fileSys.close();
((DistributedFileSystem) fileSys).removeCachePool("pool1");
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Aggregations