use of org.apache.hadoop.security.UserGroupInformation in project hbase by apache.
the class AuthUtil method getAuthChore.
/**
* Checks if security is enabled and if so, launches chore for refreshing kerberos ticket.
* @param conf the hbase service configuration
* @return a ScheduledChore for renewals, if needed, and null otherwise.
*/
public static ScheduledChore getAuthChore(Configuration conf) throws IOException {
UserProvider userProvider = UserProvider.instantiate(conf);
// login the principal (if using secure Hadoop)
boolean securityEnabled = userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled();
if (!securityEnabled)
return null;
String host = null;
try {
host = Strings.domainNamePointerToHostName(DNS.getDefaultHost(conf.get("hbase.client.dns.interface", "default"), conf.get("hbase.client.dns.nameserver", "default")));
userProvider.login("hbase.client.keytab.file", "hbase.client.kerberos.principal", host);
} catch (UnknownHostException e) {
LOG.error("Error resolving host name: " + e.getMessage(), e);
throw e;
} catch (IOException e) {
LOG.error("Error while trying to perform the initial login: " + e.getMessage(), e);
throw e;
}
final UserGroupInformation ugi = userProvider.getCurrent().getUGI();
Stoppable stoppable = new Stoppable() {
private volatile boolean isStopped = false;
@Override
public void stop(String why) {
isStopped = true;
}
@Override
public boolean isStopped() {
return isStopped;
}
};
// if you're in debug mode this is useful to avoid getting spammed by the getTGT()
// you can increase this, keeping in mind that the default refresh window is 0.8
// e.g. 5min tgt * 0.8 = 4min refresh so interval is better be way less than 1min
// 30sec
final int CHECK_TGT_INTERVAL = 30 * 1000;
ScheduledChore refreshCredentials = new ScheduledChore("RefreshCredentials", stoppable, CHECK_TGT_INTERVAL) {
@Override
protected void chore() {
try {
ugi.checkTGTAndReloginFromKeytab();
} catch (IOException e) {
LOG.error("Got exception while trying to refresh credentials: " + e.getMessage(), e);
}
}
};
return refreshCredentials;
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class TestTokenClientRMService method testTokenCancellationByWrongUser.
@Test
public void testTokenCancellationByWrongUser() {
// two sets to test -
// 1. try to cancel tokens of short and kerberos users as a kerberos UGI
// 2. try to cancel tokens of short and kerberos users as a simple auth UGI
RMContext rmContext = mock(RMContext.class);
final ClientRMService rmService = new ClientRMService(rmContext, null, null, null, null, dtsm);
UserGroupInformation[] kerbTestOwners = { owner, other, tester, ownerKerb, otherKerb };
UserGroupInformation[] kerbTestRenewers = { owner, other, ownerKerb, otherKerb };
for (final UserGroupInformation tokOwner : kerbTestOwners) {
for (final UserGroupInformation tokRenewer : kerbTestRenewers) {
try {
testerKerb.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
checkTokenCancellation(rmService, tokOwner, tokRenewer);
Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = " + tokRenewer.getUserName());
return null;
} catch (YarnException e) {
Assert.assertTrue(e.getMessage().contains(testerKerb.getUserName() + " is not authorized to cancel the token"));
return null;
}
}
});
} catch (Exception e) {
Assert.fail("Unexpected exception; " + e.getMessage());
}
}
}
UserGroupInformation[] simpleTestOwners = { owner, other, ownerKerb, otherKerb, testerKerb };
UserGroupInformation[] simpleTestRenewers = { owner, other, ownerKerb, otherKerb };
for (final UserGroupInformation tokOwner : simpleTestOwners) {
for (final UserGroupInformation tokRenewer : simpleTestRenewers) {
try {
tester.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
checkTokenCancellation(tokOwner, tokRenewer);
Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = " + tokRenewer.getUserName());
return null;
} catch (YarnException ex) {
Assert.assertTrue(ex.getMessage().contains(tester.getUserName() + " is not authorized to cancel the token"));
return null;
}
}
});
} catch (Exception e) {
Assert.fail("Unexpected exception; " + e.getMessage());
}
}
}
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class TestWorkPreservingRMRestart method testRMRestartWithRemovedQueue.
// 1. submit an app to default queue and let it finish
// 2. restart rm with no default queue
// 3. getApplicationReport call should succeed (with no NPE)
@Test(timeout = 30000)
public void testRMRestartWithRemovedQueue() throws Exception {
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "");
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
rm1 = new MockRM(conf, memStore);
rm1.start();
MockNM nm1 = new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
nm1.registerNode();
final RMApp app1 = rm1.submitApp(1024, "app1", USER_1, null);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
MockRM.finishAMAndVerifyAppState(app1, rm1, nm1, am1);
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(conf);
csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] { QUEUE_DOESNT_EXIST });
final String noQueue = CapacitySchedulerConfiguration.ROOT + "." + QUEUE_DOESNT_EXIST;
csConf.setCapacity(noQueue, 100);
rm2 = new MockRM(csConf, memStore);
rm2.start();
UserGroupInformation user2 = UserGroupInformation.createRemoteUser("user2");
ApplicationReport report = user2.doAs(new PrivilegedExceptionAction<ApplicationReport>() {
@Override
public ApplicationReport run() throws Exception {
return rm2.getApplicationReport(app1.getApplicationId());
}
});
Assert.assertNotNull(report);
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class TestFSRMStateStore method testHDFSRMStateStore.
@Test(timeout = 60000)
public void testHDFSRMStateStore() throws Exception {
final HdfsConfiguration conf = new HdfsConfiguration();
UserGroupInformation yarnAdmin = UserGroupInformation.createUserForTesting("yarn", new String[] { "admin" });
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.getFileSystem().mkdir(new Path("/yarn"), FsPermission.valueOf("-rwxrwxrwx"));
cluster.getFileSystem().setOwner(new Path("/yarn"), "yarn", "admin");
final UserGroupInformation hdfsAdmin = UserGroupInformation.getCurrentUser();
final StoreStateVerifier verifier = new StoreStateVerifier() {
@Override
void afterStoreApp(final RMStateStore store, final ApplicationId appId) {
try {
// Wait for things to settle
Thread.sleep(5000);
hdfsAdmin.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
verifyFilesUnreadablebyHDFS(cluster, ((FileSystemRMStateStore) store).getAppDir(appId));
return null;
}
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
void afterStoreAppAttempt(final RMStateStore store, final ApplicationAttemptId appAttId) {
try {
// Wait for things to settle
Thread.sleep(5000);
hdfsAdmin.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
verifyFilesUnreadablebyHDFS(cluster, ((FileSystemRMStateStore) store).getAppAttemptDir(appAttId));
return null;
}
});
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
try {
yarnAdmin.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
fsTester = new TestFSRMStateStoreTester(cluster, true);
testRMAppStateStore(fsTester, verifier);
return null;
}
});
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.security.UserGroupInformation in project hadoop by apache.
the class TestLeafQueue method testInheritedQueueAcls.
@Test
public void testInheritedQueueAcls() throws IOException {
UserGroupInformation user = UserGroupInformation.getCurrentUser();
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
LeafQueue b = stubLeafQueue((LeafQueue) queues.get(B));
ParentQueue c = (ParentQueue) queues.get(C);
LeafQueue c1 = stubLeafQueue((LeafQueue) queues.get(C1));
assertFalse(root.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertTrue(a.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertTrue(b.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertFalse(c.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertFalse(c1.hasAccess(QueueACL.SUBMIT_APPLICATIONS, user));
assertTrue(hasQueueACL(a.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
assertTrue(hasQueueACL(b.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(c.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(c1.getQueueUserAclInfo(user), QueueACL.SUBMIT_APPLICATIONS));
}
Aggregations