use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class ImageLoaderCurrent method processDelegationTokens.
/**
* Process the Delegation Token related section in fsimage.
*
* @param in DataInputStream to process
* @param v Visitor to walk over records
*/
private void processDelegationTokens(DataInputStream in, ImageVisitor v) throws IOException {
v.visit(ImageElement.CURRENT_DELEGATION_KEY_ID, in.readInt());
int numDKeys = in.readInt();
v.visitEnclosingElement(ImageElement.DELEGATION_KEYS, ImageElement.NUM_DELEGATION_KEYS, numDKeys);
for (int i = 0; i < numDKeys; i++) {
DelegationKey key = new DelegationKey();
key.readFields(in);
v.visit(ImageElement.DELEGATION_KEY, key.toString());
}
v.leaveEnclosingElement();
v.visit(ImageElement.DELEGATION_TOKEN_SEQUENCE_NUMBER, in.readInt());
int numDTokens = in.readInt();
v.visitEnclosingElement(ImageElement.DELEGATION_TOKENS, ImageElement.NUM_DELEGATION_TOKENS, numDTokens);
for (int i = 0; i < numDTokens; i++) {
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
long expiryTime = in.readLong();
v.visitEnclosingElement(ImageElement.DELEGATION_TOKEN_IDENTIFIER);
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_KIND, id.getKind().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_SEQNO, id.getSequenceNumber());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_OWNER, id.getOwner().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_RENEWER, id.getRenewer().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_REALUSER, id.getRealUser().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE, id.getIssueDate());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE, id.getMaxDate());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME, expiryTime);
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID, id.getMasterKeyId());
// DELEGATION_TOKEN_IDENTIFIER
v.leaveEnclosingElement();
}
// DELEGATION_TOKENS
v.leaveEnclosingElement();
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class TestSecurityTokenEditLog method testEditsForCancelOnTokenExpire.
@Test(timeout = 10000)
public void testEditsForCancelOnTokenExpire() throws IOException, InterruptedException {
long renewInterval = 2000;
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
conf.setLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, renewInterval);
conf.setLong(DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, renewInterval * 2);
Text renewer = new Text(UserGroupInformation.getCurrentUser().getUserName());
FSImage fsImage = mock(FSImage.class);
FSEditLog log = mock(FSEditLog.class);
doReturn(log).when(fsImage).getEditLog();
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
DelegationTokenSecretManager dtsm = fsn.getDelegationTokenSecretManager();
try {
dtsm.startThreads();
// get two tokens
Token<DelegationTokenIdentifier> token1 = fsn.getDelegationToken(renewer);
Token<DelegationTokenIdentifier> token2 = fsn.getDelegationToken(renewer);
DelegationTokenIdentifier ident1 = token1.decodeIdentifier();
DelegationTokenIdentifier ident2 = token2.decodeIdentifier();
// verify we got the tokens
verify(log, times(1)).logGetDelegationToken(eq(ident1), anyLong());
verify(log, times(1)).logGetDelegationToken(eq(ident2), anyLong());
// this is a little tricky because DTSM doesn't let us set scan interval
// so need to periodically sleep, then stop/start threads to force scan
// renew first token 1/2 to expire
Thread.sleep(renewInterval / 2);
fsn.renewDelegationToken(token2);
verify(log, times(1)).logRenewDelegationToken(eq(ident2), anyLong());
// force scan and give it a little time to complete
dtsm.stopThreads();
dtsm.startThreads();
Thread.sleep(250);
// no token has expired yet
verify(log, times(0)).logCancelDelegationToken(eq(ident1));
verify(log, times(0)).logCancelDelegationToken(eq(ident2));
// sleep past expiration of 1st non-renewed token
Thread.sleep(renewInterval / 2);
dtsm.stopThreads();
dtsm.startThreads();
Thread.sleep(250);
// non-renewed token should have implicitly been cancelled
verify(log, times(1)).logCancelDelegationToken(eq(ident1));
verify(log, times(0)).logCancelDelegationToken(eq(ident2));
// sleep past expiration of 2nd renewed token
Thread.sleep(renewInterval / 2);
dtsm.stopThreads();
dtsm.startThreads();
Thread.sleep(250);
// both tokens should have been implicitly cancelled by now
verify(log, times(1)).logCancelDelegationToken(eq(ident1));
verify(log, times(1)).logCancelDelegationToken(eq(ident2));
} finally {
dtsm.stopThreads();
}
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class TestDelegationTokenRenewer method testRMRestartWithExpiredToken.
// 1. token is expired before app completes.
// 2. RM shutdown.
// 3. When RM recovers the app, token renewal will fail as token expired.
// RM should request a new token and sent it to NM for log-aggregation.
@Test
public void testRMRestartWithExpiredToken() throws Exception {
Configuration yarnConf = new YarnConfiguration();
yarnConf.setBoolean(YarnConfiguration.RM_PROXY_USER_PRIVILEGES_ENABLED, true);
yarnConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
yarnConf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
yarnConf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
UserGroupInformation.setConfiguration(yarnConf);
// create Token1:
Text userText1 = new Text("user1");
DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(userText1, new Text("renewer1"), userText1);
final Token<DelegationTokenIdentifier> originalToken = new Token<>(dtId1.getBytes(), "password1".getBytes(), dtId1.getKind(), new Text("service1"));
Credentials credentials = new Credentials();
credentials.addToken(userText1, originalToken);
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(yarnConf);
MockRM rm1 = new TestSecurityMockRM(yarnConf, memStore);
rm1.start();
RMApp app = rm1.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false, "default", 1, credentials);
// create token2
Text userText2 = new Text("user1");
DelegationTokenIdentifier dtId2 = new DelegationTokenIdentifier(userText1, new Text("renewer2"), userText2);
final Token<DelegationTokenIdentifier> updatedToken = new Token<DelegationTokenIdentifier>(dtId2.getBytes(), "password2".getBytes(), dtId2.getKind(), new Text("service2"));
AtomicBoolean firstRenewInvoked = new AtomicBoolean(false);
AtomicBoolean secondRenewInvoked = new AtomicBoolean(false);
MockRM rm2 = new TestSecurityMockRM(yarnConf, memStore) {
@Override
protected DelegationTokenRenewer createDelegationTokenRenewer() {
return new DelegationTokenRenewer() {
@Override
protected void renewToken(final DelegationTokenToRenew dttr) throws IOException {
if (dttr.token.equals(updatedToken)) {
secondRenewInvoked.set(true);
super.renewToken(dttr);
} else if (dttr.token.equals(originalToken)) {
firstRenewInvoked.set(true);
throw new InvalidToken("Failed to renew");
} else {
throw new IOException("Unexpected");
}
}
@Override
protected Token<?>[] obtainSystemTokensForUser(String user, final Credentials credentials) throws IOException {
credentials.addToken(updatedToken.getService(), updatedToken);
return new Token<?>[] { updatedToken };
}
};
}
};
// simulating restart the rm
rm2.start();
// check nm can retrieve the token
final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm2.getResourceTrackerService());
nm1.registerNode();
NodeHeartbeatResponse response = nm1.nodeHeartbeat(true);
ByteBuffer tokenBuffer = response.getSystemCredentialsForApps().get(app.getApplicationId());
Assert.assertNotNull(tokenBuffer);
Credentials appCredentials = new Credentials();
DataInputByteBuffer buf = new DataInputByteBuffer();
tokenBuffer.rewind();
buf.reset(tokenBuffer);
appCredentials.readTokenStorageStream(buf);
Assert.assertTrue(firstRenewInvoked.get() && secondRenewInvoked.get());
Assert.assertTrue(appCredentials.getAllTokens().contains(updatedToken));
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class TestDelegationTokenRenewer method testCancelWithMultipleAppSubmissions.
// Test submitting an application with the token obtained by a previously
// submitted application that is set to be cancelled. Token should be
// renewed while all apps are running, and then cancelled when all apps
// complete
@Test(timeout = 30000)
public void testCancelWithMultipleAppSubmissions() throws Exception {
MockRM rm = new TestSecurityMockRM(conf, null);
rm.start();
final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
nm1.registerNode();
// create Token1:
Text userText1 = new Text("user");
DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(userText1, new Text("renewer1"), userText1);
final Token<DelegationTokenIdentifier> token1 = new Token<DelegationTokenIdentifier>(dtId1.getBytes(), "password1".getBytes(), dtId1.getKind(), new Text("service1"));
Credentials credentials = new Credentials();
credentials.addToken(token1.getService(), token1);
DelegationTokenRenewer renewer = rm.getRMContext().getDelegationTokenRenewer();
Assert.assertTrue(renewer.getAllTokens().isEmpty());
Assert.assertFalse(Renewer.cancelled);
Resource resource = Records.newRecord(Resource.class);
resource.setMemorySize(200);
RMApp app1 = rm.submitApp(resource, "name", "user", null, false, null, 2, credentials, null, true, false, false, null, 0, null, true, null);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
DelegationTokenToRenew dttr = renewer.getAllTokens().get(token1);
Assert.assertNotNull(dttr);
Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
RMApp app2 = rm.submitApp(resource, "name", "user", null, false, null, 2, credentials, null, true, false, false, null, 0, null, true, null);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING);
Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
Assert.assertTrue(dttr.referringAppIds.contains(app2.getApplicationId()));
Assert.assertTrue(dttr.referringAppIds.contains(app2.getApplicationId()));
Assert.assertFalse(Renewer.cancelled);
finishAMAndWaitForComplete(app2, rm, nm1, am2, dttr);
// app2 completes, app1 is still running, check the token is not cancelled
Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
Assert.assertFalse(dttr.referringAppIds.contains(app2.getApplicationId()));
Assert.assertFalse(dttr.isTimerCancelled());
Assert.assertFalse(Renewer.cancelled);
RMApp app3 = rm.submitApp(resource, "name", "user", null, false, null, 2, credentials, null, true, false, false, null, 0, null, true, null);
MockAM am3 = MockRM.launchAndRegisterAM(app3, rm, nm1);
rm.waitForState(app3.getApplicationId(), RMAppState.RUNNING);
Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
Assert.assertTrue(dttr.referringAppIds.contains(app3.getApplicationId()));
Assert.assertFalse(dttr.isTimerCancelled());
Assert.assertFalse(Renewer.cancelled);
finishAMAndWaitForComplete(app1, rm, nm1, am1, dttr);
Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
Assert.assertFalse(dttr.referringAppIds.contains(app1.getApplicationId()));
Assert.assertTrue(dttr.referringAppIds.contains(app3.getApplicationId()));
Assert.assertFalse(dttr.isTimerCancelled());
Assert.assertFalse(Renewer.cancelled);
finishAMAndWaitForComplete(app3, rm, nm1, am3, dttr);
Assert.assertFalse(renewer.getAllTokens().containsKey(token1));
Assert.assertTrue(dttr.referringAppIds.isEmpty());
Assert.assertTrue(dttr.isTimerCancelled());
Assert.assertTrue(Renewer.cancelled);
// make sure the token also has been removed from appTokens
Assert.assertFalse(renewer.getDelegationTokens().contains(token1));
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class TestDelegationTokenRenewer method createTokens.
/**
* Auxiliary - create token
* @param renewer
* @return
* @throws IOException
*/
static MyToken createTokens(Text renewer) throws IOException {
Text user1 = new Text("user1");
MyDelegationTokenSecretManager sm = new MyDelegationTokenSecretManager(DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT, DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT, DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT, 3600000, null);
sm.startThreads();
DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(user1, renewer, user1);
MyToken token1 = new MyToken(dtId1, sm);
token1.setService(new Text("localhost:0"));
return token1;
}
Aggregations