use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class TestAclsEndToEnd method testGoodWithKeyAcls.
/**
* Test the full life cycle of a key using a config with key ACLs.
* The configuration used is the correct configuration to pass the full ACL
* test in {@link #doFullAclTest()}.
*
* @throws Exception thrown on test failure
*/
@Test
public void testGoodWithKeyAcls() throws Exception {
UserGroupInformation hdfsUgi = UserGroupInformation.createProxyUserForTesting("hdfs", realUgi, new String[] { "supergroup" });
UserGroupInformation keyadminUgi = UserGroupInformation.createProxyUserForTesting("keyadmin", realUgi, new String[] { "keyadmin" });
UserGroupInformation userUgi = UserGroupInformation.createProxyUserForTesting("user", realUgi, new String[] { "staff" });
Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
setBlacklistAcls(conf, hdfsUgi);
setKeyAcls(conf, KeyAuthorizationKeyProvider.KEY_ACL + KEY1 + ".", hdfsUgi, keyadminUgi, userUgi);
doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class TestAclsEndToEnd method testGoodWithWhitelistWithoutBlacklist.
/**
* Test the full life cycle of a key using a config with whitelist key ACLs
* and without blacklist ACLs. The configuration used is the correct
* configuration to pass the full ACL test in {@link #doFullAclTest()}.
*
* @throws Exception thrown on test failure
*/
@Test
public void testGoodWithWhitelistWithoutBlacklist() throws Exception {
UserGroupInformation hdfsUgi = UserGroupInformation.createProxyUserForTesting("hdfs", realUgi, new String[] { "supergroup" });
UserGroupInformation keyadminUgi = UserGroupInformation.createProxyUserForTesting("keyadmin", realUgi, new String[] { "keyadmin" });
UserGroupInformation userUgi = UserGroupInformation.createProxyUserForTesting("user", realUgi, new String[] { "staff" });
Configuration conf = getBaseConf(hdfsUgi, keyadminUgi);
setKeyAcls(conf, KMSConfiguration.WHITELIST_KEY_ACL_PREFIX, hdfsUgi, keyadminUgi, userUgi);
doFullAclTest(conf, hdfsUgi, keyadminUgi, userUgi);
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class TestSafeMode method testOperationsWhileInSafeMode.
/**
* Run various fs operations while the NN is in safe mode,
* assert that they are either allowed or fail as expected.
*/
@Test
public void testOperationsWhileInSafeMode() throws IOException, InterruptedException {
final Path file1 = new Path("/file1");
assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
DFSTestUtil.createFile(fs, file1, 1024, (short) 1, 0);
assertTrue("Could not enter SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
runFsFun("Set quota while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
((DistributedFileSystem) fs).setQuota(file1, 1, 1);
}
});
runFsFun("Set perm while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setPermission(file1, FsPermission.getDefault());
}
});
runFsFun("Set owner while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setOwner(file1, "user", "group");
}
});
runFsFun("Set repl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setReplication(file1, (short) 1);
}
});
runFsFun("Append file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
DFSTestUtil.appendFile(fs, file1, "new bytes");
}
});
runFsFun("Truncate file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.truncate(file1, 0);
}
});
runFsFun("Delete file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.delete(file1, false);
}
});
runFsFun("Rename file while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.rename(file1, new Path("file2"));
}
});
runFsFun("Set time while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setTimes(file1, 0, 0);
}
});
runFsFun("modifyAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.modifyAclEntries(file1, Lists.<AclEntry>newArrayList());
}
});
runFsFun("removeAclEntries while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAclEntries(file1, Lists.<AclEntry>newArrayList());
}
});
runFsFun("removeDefaultAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeDefaultAcl(file1);
}
});
runFsFun("removeAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeAcl(file1);
}
});
runFsFun("setAcl while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setAcl(file1, Lists.<AclEntry>newArrayList());
}
});
runFsFun("setXAttr while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.setXAttr(file1, "user.a1", null);
}
});
runFsFun("removeXAttr while in SM", new FSRun() {
@Override
public void run(FileSystem fs) throws IOException {
fs.removeXAttr(file1, "user.a1");
}
});
try {
DFSTestUtil.readFile(fs, file1);
} catch (IOException ioe) {
fail("Set times failed while in SM");
}
try {
fs.getAclStatus(file1);
} catch (IOException ioe) {
fail("getAclStatus failed while in SM");
}
// Test access
UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws IOException {
return FileSystem.get(conf);
}
});
myfs.access(file1, FsAction.READ);
try {
myfs.access(file1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
assertFalse("Could not leave SM", dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class TestSecureEncryptionZoneWithKMS method testSecureEncryptionZoneWithKMS.
@Test
public void testSecureEncryptionZoneWithKMS() throws IOException, InterruptedException {
final Path zonePath = new Path(TEST_PATH, "TestEZ1");
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), true);
fsWrapper.setOwner(zonePath, OOZIE_PROXIED_USER_NAME, "supergroup");
dfsAdmin.createEncryptionZone(zonePath, testKey, NO_TRASH);
UserGroupInformation oozieUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(ooziePrincipal, keytab);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUser(OOZIE_PROXIED_USER_NAME, oozieUgi);
proxyUserUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
// Get a client handler within the proxy user context for createFile
try (DistributedFileSystem dfs = cluster.getFileSystem()) {
for (int i = 0; i < 3; i++) {
Path filePath = new Path(zonePath, "testData." + i + ".dat");
DFSTestUtil.createFile(dfs, filePath, 1024, (short) 3, 1L);
}
return null;
} catch (IOException e) {
throw new IOException(e);
}
}
});
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class TestReservedRawPaths method testAdminAccessOnly.
@Test(timeout = 120000)
public void testAdminAccessOnly() throws Exception {
final Path zone = new Path("zone");
final Path slashZone = new Path("/", zone);
fs.mkdirs(slashZone);
dfsAdmin.createEncryptionZone(slashZone, TEST_KEY, NO_TRASH);
final Path base = new Path("base");
final Path reservedRaw = new Path("/.reserved/raw");
final int len = 8192;
/* Test failure of create file in reserved/raw as non admin */
final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
/* Test failure of getFileStatus in reserved/raw as non admin */
final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
fs.getFileStatus(ezRawEncFile);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
/* Test failure of listStatus in reserved/raw as non admin */
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
try {
fs.listStatus(ezRawEncFile);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
fs.setPermission(new Path("/"), new FsPermission((short) 0777));
/* Test failure of mkdir in reserved/raw as non admin */
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DistributedFileSystem fs = cluster.getFileSystem();
final Path d1 = new Path(reservedRaw, "dir1");
try {
fs.mkdirs(d1);
fail("access to /.reserved/raw is superuser-only operation");
} catch (AccessControlException e) {
assertExceptionContains("Superuser privilege is required", e);
}
return null;
}
});
}
Aggregations