use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class TestDelegationTokenRenewer method testAppSubmissionWithPreviousToken.
// Test submitting an application with the token obtained by a previously
// submitted application.
@Test(timeout = 30000)
public void testAppSubmissionWithPreviousToken() throws Exception {
MockRM rm = new TestSecurityMockRM(conf, null);
rm.start();
final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
nm1.registerNode();
// create Token1:
Text userText1 = new Text("user");
DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(userText1, new Text("renewer1"), userText1);
final Token<DelegationTokenIdentifier> token1 = new Token<DelegationTokenIdentifier>(dtId1.getBytes(), "password1".getBytes(), dtId1.getKind(), new Text("service1"));
Credentials credentials = new Credentials();
credentials.addToken(userText1, token1);
// submit app1 with a token, set cancelTokenWhenComplete to false;
Resource resource = Records.newRecord(Resource.class);
resource.setMemorySize(200);
RMApp app1 = rm.submitApp(resource, "name", "user", null, false, null, 2, credentials, null, true, false, false, null, 0, null, false, null);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
DelegationTokenRenewer renewer = rm.getRMContext().getDelegationTokenRenewer();
DelegationTokenToRenew dttr = renewer.getAllTokens().get(token1);
Assert.assertNotNull(dttr);
// submit app2 with the same token, set cancelTokenWhenComplete to true;
RMApp app2 = rm.submitApp(resource, "name", "user", null, false, null, 2, credentials, null, true, false, false, null, 0, null, true, null);
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING);
finishAMAndWaitForComplete(app2, rm, nm1, am2, dttr);
Assert.assertTrue(rm.getRMContext().getDelegationTokenRenewer().getAllTokens().containsKey(token1));
finishAMAndWaitForComplete(app1, rm, nm1, am1, dttr);
// app2 completes, app1 is still running, check the token is not cancelled
Assert.assertFalse(Renewer.cancelled);
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class TestDelegationTokenRenewer method testConcurrentAddApplication.
@Test(timeout = 20000)
public void testConcurrentAddApplication() throws IOException, InterruptedException, BrokenBarrierException {
final CyclicBarrier startBarrier = new CyclicBarrier(2);
final CyclicBarrier endBarrier = new CyclicBarrier(2);
// this token uses barriers to block during renew
final Credentials creds1 = new Credentials();
final Token<DelegationTokenIdentifier> token1 = mock(Token.class);
when(token1.getKind()).thenReturn(KIND);
DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(new Text("user1"), new Text("renewer"), new Text("user1"));
when(token1.decodeIdentifier()).thenReturn(dtId1);
creds1.addToken(new Text("token"), token1);
doReturn(true).when(token1).isManaged();
doAnswer(new Answer<Long>() {
public Long answer(InvocationOnMock invocation) throws InterruptedException, BrokenBarrierException {
startBarrier.await();
endBarrier.await();
return Long.MAX_VALUE;
}
}).when(token1).renew(any(Configuration.class));
// this dummy token fakes renewing
final Credentials creds2 = new Credentials();
final Token<DelegationTokenIdentifier> token2 = mock(Token.class);
when(token2.getKind()).thenReturn(KIND);
when(token2.decodeIdentifier()).thenReturn(dtId1);
creds2.addToken(new Text("token"), token2);
doReturn(true).when(token2).isManaged();
doReturn(Long.MAX_VALUE).when(token2).renew(any(Configuration.class));
// fire up the renewer
final DelegationTokenRenewer dtr = createNewDelegationTokenRenewer(conf, counter);
RMContext mockContext = mock(RMContext.class);
when(mockContext.getSystemCredentialsForApps()).thenReturn(new ConcurrentHashMap<ApplicationId, ByteBuffer>());
ClientRMService mockClientRMService = mock(ClientRMService.class);
when(mockContext.getClientRMService()).thenReturn(mockClientRMService);
InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234);
when(mockClientRMService.getBindAddress()).thenReturn(sockAddr);
dtr.setRMContext(mockContext);
when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr);
dtr.init(conf);
dtr.start();
// submit a job that blocks during renewal
Thread submitThread = new Thread() {
@Override
public void run() {
dtr.addApplicationAsync(mock(ApplicationId.class), creds1, false, "user", new Configuration());
}
};
submitThread.start();
// wait till 1st submit blocks, then submit another
startBarrier.await();
dtr.addApplicationAsync(mock(ApplicationId.class), creds2, false, "user", new Configuration());
// signal 1st to complete
endBarrier.await();
submitThread.join();
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class DFSClient method getDelegationToken.
/**
* @see ClientProtocol#getDelegationToken(Text)
*/
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException {
assert dtService != null;
try (TraceScope ignored = tracer.newScope("getDelegationToken")) {
Token<DelegationTokenIdentifier> token = namenode.getDelegationToken(renewer);
if (token != null) {
token.setService(this.dtService);
LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
} else {
LOG.info("Cannot get delegation token from " + renewer);
}
return token;
}
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class HAUtil method cloneDelegationTokenForLogicalUri.
/**
* Locate a delegation token associated with the given HA cluster URI, and if
* one is found, clone it to also represent the underlying namenode address.
* @param ugi the UGI to modify
* @param haUri the logical URI for the cluster
* @param nnAddrs collection of NNs in the cluster to which the token
* applies
*/
public static void cloneDelegationTokenForLogicalUri(UserGroupInformation ugi, URI haUri, Collection<InetSocketAddress> nnAddrs) {
// this cloning logic is only used by hdfs
Text haService = HAUtilClient.buildTokenServiceForLogicalUri(haUri, HdfsConstants.HDFS_URI_SCHEME);
Token<DelegationTokenIdentifier> haToken = tokenSelector.selectToken(haService, ugi.getTokens());
if (haToken != null) {
for (InetSocketAddress singleNNAddr : nnAddrs) {
// this is a minor hack to prevent physical HA tokens from being
// exposed to the user via UGI.getCredentials(), otherwise these
// cloned tokens may be inadvertently propagated to jobs
Token<DelegationTokenIdentifier> specificToken = haToken.privateClone(buildTokenService(singleNNAddr));
Text alias = new Text(HAUtilClient.buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME) + "//" + specificToken.getService());
ugi.addToken(alias, specificToken);
if (LOG.isDebugEnabled()) {
LOG.debug("Mapped HA service delegation token for logical URI " + haUri + " to namenode " + singleNNAddr);
}
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("No HA service delegation token found for logical URI " + haUri);
}
}
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class JspHelper method getTokenUGI.
private static UserGroupInformation getTokenUGI(ServletContext context, HttpServletRequest request, String tokenString, Configuration conf) throws IOException {
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(tokenString);
InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
if (serviceAddress != null) {
SecurityUtil.setTokenService(token, serviceAddress);
token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
}
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
if (context != null) {
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
if (nn != null) {
// Verify the token.
nn.getNamesystem().verifyToken(id, token.getPassword());
}
}
UserGroupInformation ugi = id.getUser();
ugi.addToken(token);
return ugi;
}
Aggregations