use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class TestDelegationTokenRenewer method testAppSubmissionWithoutDelegationToken.
// YARN will get the token for the app submitted without the delegation token.
@Test
public void testAppSubmissionWithoutDelegationToken() throws Exception {
conf.setBoolean(YarnConfiguration.RM_PROXY_USER_PRIVILEGES_ENABLED, true);
// create token2
Text userText2 = new Text("user2");
DelegationTokenIdentifier dtId2 = new DelegationTokenIdentifier(new Text("user2"), new Text("renewer2"), userText2);
final Token<DelegationTokenIdentifier> token2 = new Token<DelegationTokenIdentifier>(dtId2.getBytes(), "password2".getBytes(), dtId2.getKind(), new Text("service2"));
final MockRM rm = new TestSecurityMockRM(conf, null) {
@Override
protected DelegationTokenRenewer createDelegationTokenRenewer() {
return new DelegationTokenRenewer() {
@Override
protected Token<?>[] obtainSystemTokensForUser(String user, final Credentials credentials) throws IOException {
credentials.addToken(token2.getService(), token2);
return new Token<?>[] { token2 };
}
};
}
};
rm.start();
// submit an app without delegationToken
RMApp app = rm.submitApp(200);
// wait for the new retrieved hdfs token.
GenericTestUtils.waitFor(new Supplier<Boolean>() {
public Boolean get() {
return rm.getRMContext().getDelegationTokenRenewer().getDelegationTokens().contains(token2);
}
}, 1000, 20000);
// check nm can retrieve the token
final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
nm1.registerNode();
NodeHeartbeatResponse response = nm1.nodeHeartbeat(true);
ByteBuffer tokenBuffer = response.getSystemCredentialsForApps().get(app.getApplicationId());
Assert.assertNotNull(tokenBuffer);
Credentials appCredentials = new Credentials();
DataInputByteBuffer buf = new DataInputByteBuffer();
tokenBuffer.rewind();
buf.reset(tokenBuffer);
appCredentials.readTokenStorageStream(buf);
Assert.assertTrue(appCredentials.getAllTokens().contains(token2));
}
use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class TestDelegationTokenRenewer method testDTRenewal.
/**
* Basic idea of the test:
* 1. create tokens.
* 2. Mark one of them to be renewed in 2 seconds (instead of
* 24 hours)
* 3. register them for renewal
* 4. sleep for 3 seconds
* 5. count number of renewals (should 3 initial ones + one extra)
* 6. register another token for 2 seconds
* 7. cancel it immediately
* 8. Sleep and check that the 2 seconds renew didn't happen
* (totally 5 renewals)
* 9. check cancellation
* @throws IOException
* @throws URISyntaxException
*/
@Test(timeout = 60000)
public void testDTRenewal() throws Exception {
MyFS dfs = (MyFS) FileSystem.get(conf);
LOG.info("dfs=" + (Object) dfs.hashCode() + ";conf=" + conf.hashCode());
// Test 1. - add three tokens - make sure exactly one get's renewed
// get the delegation tokens
MyToken token1, token2, token3;
token1 = dfs.getDelegationToken("user1");
token2 = dfs.getDelegationToken("user2");
token3 = dfs.getDelegationToken("user3");
//to cause this one to be set for renew in 2 secs
Renewer.tokenToRenewIn2Sec = token1;
LOG.info("token=" + token1 + " should be renewed for 2 secs");
// three distinct Namenodes
String nn1 = DelegationTokenRenewer.SCHEME + "://host1:0";
String nn2 = DelegationTokenRenewer.SCHEME + "://host2:0";
String nn3 = DelegationTokenRenewer.SCHEME + "://host3:0";
Credentials ts = new Credentials();
// register the token for renewal
ts.addToken(new Text(nn1), token1);
ts.addToken(new Text(nn2), token2);
ts.addToken(new Text(nn3), token3);
// register the tokens for renewal
ApplicationId applicationId_0 = BuilderUtils.newApplicationId(0, 0);
delegationTokenRenewer.addApplicationAsync(applicationId_0, ts, true, "user", new Configuration());
waitForEventsToGetProcessed(delegationTokenRenewer);
// first 3 initial renewals + 1 real
int numberOfExpectedRenewals = 3 + 1;
int attempts = 10;
while (attempts-- > 0) {
try {
// sleep 3 seconds, so it has time to renew
Thread.sleep(3 * 1000);
} catch (InterruptedException e) {
}
// since we cannot guarantee timely execution - let's give few chances
if (Renewer.counter == numberOfExpectedRenewals)
break;
}
LOG.info("dfs=" + dfs.hashCode() + ";Counter = " + Renewer.counter + ";t=" + Renewer.lastRenewed);
assertEquals("renew wasn't called as many times as expected(4):", numberOfExpectedRenewals, Renewer.counter);
assertEquals("most recently renewed token mismatch", Renewer.lastRenewed, token1);
// Test 2.
// add another token ( that expires in 2 secs). Then remove it, before
// time is up.
// Wait for 3 secs , and make sure no renews were called
ts = new Credentials();
MyToken token4 = dfs.getDelegationToken("user4");
//to cause this one to be set for renew in 2 secs
Renewer.tokenToRenewIn2Sec = token4;
LOG.info("token=" + token4 + " should be renewed for 2 secs");
String nn4 = DelegationTokenRenewer.SCHEME + "://host4:0";
ts.addToken(new Text(nn4), token4);
ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1);
delegationTokenRenewer.addApplicationAsync(applicationId_1, ts, true, "user", new Configuration());
waitForEventsToGetProcessed(delegationTokenRenewer);
delegationTokenRenewer.applicationFinished(applicationId_1);
waitForEventsToGetProcessed(delegationTokenRenewer);
// number of renewals so far
numberOfExpectedRenewals = Renewer.counter;
try {
// sleep 6 seconds, so it has time to renew
Thread.sleep(6 * 1000);
} catch (InterruptedException e) {
}
LOG.info("Counter = " + Renewer.counter + ";t=" + Renewer.lastRenewed);
// counter and the token should stil be the old ones
assertEquals("renew wasn't called as many times as expected", numberOfExpectedRenewals, Renewer.counter);
// also renewing of the cancelled token should fail
try {
token4.renew(conf);
fail("Renewal of cancelled token should have failed");
} catch (InvalidToken ite) {
//expected
}
}
use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class TestDelegationTokenRenewer method testRenewTokenUsingTokenConfProvidedByApp.
// Test DelegationTokenRenewer uses the tokenConf provided by application
// for token renewal.
@Test
public void testRenewTokenUsingTokenConfProvidedByApp() throws Exception {
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
MockRM rm = new TestSecurityMockRM(conf, null);
rm.start();
final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
nm1.registerNode();
// create a token
Text userText1 = new Text("user1");
DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(userText1, new Text("renewer1"), userText1);
final Token<DelegationTokenIdentifier> token1 = new Token<DelegationTokenIdentifier>(dtId1.getBytes(), "password1".getBytes(), dtId1.getKind(), new Text("service1"));
Credentials credentials = new Credentials();
credentials.addToken(userText1, token1);
// create token conf for renewal
Configuration appConf = new Configuration(false);
appConf.set("dfs.nameservices", "mycluster1,mycluster2");
appConf.set("dfs.namenode.rpc-address.mycluster2.nn1", "123.0.0.1");
appConf.set("dfs.namenode.rpc-address.mycluster2.nn2", "123.0.0.2");
appConf.set("dfs.ha.namenodes.mycluster2", "nn1,nn2");
appConf.set("dfs.client.failover.proxy.provider.mycluster2", "provider");
DataOutputBuffer dob = new DataOutputBuffer();
appConf.write(dob);
ByteBuffer tokenConf = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
final int confSize = appConf.size();
// submit app
RMApp app = rm.submitApp(credentials, tokenConf);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
public Boolean get() {
DelegationTokenToRenew toRenew = rm.getRMContext().getDelegationTokenRenewer().getAllTokens().get(token1);
// the specific config we added.
return toRenew != null && toRenew.conf != null && toRenew.conf.size() == confSize && toRenew.conf.get("dfs.namenode.rpc-address.mycluster2.nn1").equals("123.0.0.1");
}
}, 200, 10000);
}
use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class TestDelegationTokenRenewer method testAppTokenWithNonRenewer.
// Testcase for YARN-3021, let RM skip renewing token if the renewer string
// is empty
@Test(timeout = 60000)
public void testAppTokenWithNonRenewer() throws Exception {
MyFS dfs = (MyFS) FileSystem.get(conf);
LOG.info("dfs=" + (Object) dfs.hashCode() + ";conf=" + conf.hashCode());
// Test would fail if using non-empty renewer string here
MyToken token = dfs.getDelegationToken("");
token.cancelToken();
Credentials ts = new Credentials();
ts.addToken(token.getKind(), token);
// register the tokens for renewal
ApplicationId appId = BuilderUtils.newApplicationId(0, 0);
delegationTokenRenewer.addApplicationSync(appId, ts, true, "user");
}
use of org.apache.hadoop.security.Credentials in project hadoop by apache.
the class TestSchedulerUtils method testValidateResourceBlacklistRequest.
@Test
public void testValidateResourceBlacklistRequest() throws Exception {
MyContainerManager containerManager = new MyContainerManager();
final MockRMWithAMS rm = new MockRMWithAMS(new YarnConfiguration(), containerManager);
rm.start();
MockNM nm1 = rm.registerNode("localhost:1234", 5120);
Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(2);
acls.put(ApplicationAccessType.VIEW_APP, "*");
RMApp app = rm.submitApp(1024, "appname", "appuser", acls);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt = app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId();
waitForLaunchedState(attempt);
// Create a client to the RM.
final Configuration conf = rm.getConfig();
final YarnRPC rpc = YarnRPC.create(conf);
UserGroupInformation currentUser = UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials = containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress = rm.getApplicationMasterService().getBindAddress();
Token<? extends TokenIdentifier> amRMToken = MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress, credentials.getAllTokens());
currentUser.addToken(amRMToken);
ApplicationMasterProtocol client = currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
@Override
public ApplicationMasterProtocol run() {
return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, rmBindAddress, conf);
}
});
RegisterApplicationMasterRequest request = Records.newRecord(RegisterApplicationMasterRequest.class);
client.registerApplicationMaster(request);
ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance(Collections.singletonList(ResourceRequest.ANY), null);
AllocateRequest allocateRequest = AllocateRequest.newInstance(0, 0.0f, null, null, blacklistRequest);
boolean error = false;
try {
client.allocate(allocateRequest);
} catch (InvalidResourceBlacklistRequestException e) {
error = true;
}
rm.stop();
Assert.assertTrue("Didn't not catch InvalidResourceBlacklistRequestException", error);
}
Aggregations