use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestSafeMode method testOperationsWhileInSafeMode.
/**
* Run various fs operations while the NN is in safe mode,
* assert that they are either allowed or fail as expected.
*/
@Test
public void testOperationsWhileInSafeMode() throws IOException, InterruptedException {
final Path file1 = new Path("/file1");
assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
DFSTestUtil.createFile(fs, file1, 1024, (short) 1, 0);
assertTrue("Could not enter SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
runFsFun("Set quota while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
((DistributedFileSystem) fs).setQuota(file1, 1, 1);
}
});
runFsFun("Set perm while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setPermission(file1, FsPermission.getDefault());
}
});
runFsFun("Set owner while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setOwner(file1, "user", "group");
}
});
runFsFun("Set repl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setReplication(file1, (short) 1);
}
});
runFsFun("Append file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
DFSTestUtil.appendFile(fs, file1, "new bytes");
}
});
runFsFun("Truncate file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.truncate(file1, 0);
}
});
runFsFun("Delete file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.delete(file1, false);
}
});
runFsFun("Rename file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.rename(file1, new Path("file2"));
}
});
runFsFun("Set time while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setTimes(file1, 0, 0);
}
});
runFsFun("modifyAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.modifyAclEntries(file1, Lists.<AclEntry>newArrayList());
}
});
runFsFun("removeAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAclEntries(file1, Lists.<AclEntry>newArrayList());
}
});
runFsFun("removeDefaultAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeDefaultAcl(file1);
}
});
runFsFun("removeAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAcl(file1);
}
});
runFsFun("setAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setAcl(file1, Lists.<AclEntry>newArrayList());
}
});
runFsFun("setXAttr while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setXAttr(file1, "user.a1", null);
}
});
runFsFun("removeXAttr while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeXAttr(file1, "user.a1");
}
});
try {
DFSTestUtil.readFile(fs, file1);
} catch (IOException ioe) {
fail("Set times failed while in SM");
}
try {
fs.getAclStatus(file1);
} catch (IOException ioe) {
fail("getAclStatus failed while in SM");
}
// Test access
UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws IOException {
return FileSystem.get(conf);
}
});
myfs.access(file1, FsAction.READ);
try {
myfs.access(file1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
assertFalse("Could not leave SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestReservedRawPaths method testAdminAccessOnly.
@Test(timeout = 120000)
public void testAdminAccessOnly() throws Exception {
final Path zone = new Path("zone");
final Path slashZone = new Path("/", zone);
fs.mkdirs(slashZone);
dfsAdmin.createEncryptionZone(slashZone, TEST_KEY, NO_TRASH);
final Path base = new Path("base");
final Path reservedRaw = new Path("/.reserved/raw");
final int len = 8192;
/* Test failure of create file in reserved/raw as non admin */
final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
/* Test failure of getFileStatus in reserved/raw as non admin */
final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
fs.getFileStatus(ezRawEncFile);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
/* Test failure of listStatus in reserved/raw as non admin */
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
fs.listStatus(ezRawEncFile);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
fs.setPermission(new Path("/"), new FsPermission((short) 0777));
/* Test failure of mkdir in reserved/raw as non admin */
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
final Path d1 = new Path(reservedRaw, "dir1");
try {
fs.mkdirs(d1);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestWebHDFS method testDTInInsecureCluster.
@Test
public void testDTInInsecureCluster() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
webHdfs.getDelegationToken(null);
fail("No exception is thrown.");
} catch (AccessControlException ace) {
Assert.assertTrue(ace.getMessage().startsWith(WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestFSMainOperationsWebHdfs method testMkdirsFailsForSubdirectoryOfExistingFile.
@Override
@Test
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = getTestRootPath(fSys, "test/hadoop");
Assert.assertFalse(exists(fSys, testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys, testDir));
createFile(getTestRootPath(fSys, "test/hadoop/file"));
Path testSubDir = getTestRootPath(fSys, "test/hadoop/file/subdir");
try {
fSys.mkdirs(testSubDir);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
try {
Assert.assertFalse(exists(fSys, testSubDir));
} catch (AccessControlException e) {
// also okay for HDFS.
}
Path testDeepSubDir = getTestRootPath(fSys, "test/hadoop/file/deep/sub/dir");
try {
fSys.mkdirs(testDeepSubDir);
Assert.fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
try {
Assert.assertFalse(exists(fSys, testDeepSubDir));
} catch (AccessControlException e) {
// also okay for HDFS.
}
}
use of org.apache.hadoop.security.AccessControlException in project hadoop by apache.
the class TestWebHdfsFileSystemContract method testMkdirsFailsForSubdirectoryOfExistingFile.
/** HDFS throws AccessControlException
* when calling exist(..) on a path /foo/bar/file
* but /foo/bar is indeed a file in HDFS.
*/
@Override
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = path("/test/hadoop");
assertFalse(fs.exists(testDir));
assertTrue(fs.mkdirs(testDir));
assertTrue(fs.exists(testDir));
createFile(path("/test/hadoop/file"));
Path testSubDir = path("/test/hadoop/file/subdir");
try {
fs.mkdirs(testSubDir);
fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
try {
assertFalse(fs.exists(testSubDir));
} catch (AccessControlException e) {
// also okay for HDFS.
}
Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
try {
fs.mkdirs(testDeepSubDir);
fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
try {
assertFalse(fs.exists(testDeepSubDir));
} catch (AccessControlException e) {
// also okay for HDFS.
}
}
Aggregations