use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.
the class TestWebHdfsUrl method getWebHdfsFileSystem.
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf) throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(ugi.getUserName()), null, null);
FSNamesystem namesystem = mock(FSNamesystem.class);
DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(86400000, 86400000, 86400000, 86400000, namesystem);
dtSecretManager.startThreads();
Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
SecurityUtil.setTokenService(token, NetUtils.createSocketAddr(uri.getAuthority()));
token.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
ugi.addToken(token);
}
return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.
the class TestSecurityTokenEditLog method testEditsForCancelOnTokenExpire.
@Test(timeout = 10000)
public void testEditsForCancelOnTokenExpire() throws IOException, InterruptedException {
long renewInterval = 2000;
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
conf.setLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, renewInterval);
conf.setLong(DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, renewInterval * 2);
Text renewer = new Text(UserGroupInformation.getCurrentUser().getUserName());
FSImage fsImage = mock(FSImage.class);
FSEditLog log = mock(FSEditLog.class);
doReturn(log).when(fsImage).getEditLog();
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
DelegationTokenSecretManager dtsm = fsn.getDelegationTokenSecretManager();
try {
dtsm.startThreads();
// get two tokens
Token<DelegationTokenIdentifier> token1 = fsn.getDelegationToken(renewer);
Token<DelegationTokenIdentifier> token2 = fsn.getDelegationToken(renewer);
DelegationTokenIdentifier ident1 = token1.decodeIdentifier();
DelegationTokenIdentifier ident2 = token2.decodeIdentifier();
// verify we got the tokens
verify(log, times(1)).logGetDelegationToken(eq(ident1), anyLong());
verify(log, times(1)).logGetDelegationToken(eq(ident2), anyLong());
// this is a little tricky because DTSM doesn't let us set scan interval
// so need to periodically sleep, then stop/start threads to force scan
// renew first token 1/2 to expire
Thread.sleep(renewInterval / 2);
fsn.renewDelegationToken(token2);
verify(log, times(1)).logRenewDelegationToken(eq(ident2), anyLong());
// force scan and give it a little time to complete
dtsm.stopThreads();
dtsm.startThreads();
Thread.sleep(250);
// no token has expired yet
verify(log, times(0)).logCancelDelegationToken(eq(ident1));
verify(log, times(0)).logCancelDelegationToken(eq(ident2));
// sleep past expiration of 1st non-renewed token
Thread.sleep(renewInterval / 2);
dtsm.stopThreads();
dtsm.startThreads();
Thread.sleep(250);
// non-renewed token should have implicitly been cancelled
verify(log, times(1)).logCancelDelegationToken(eq(ident1));
verify(log, times(0)).logCancelDelegationToken(eq(ident2));
// sleep past expiration of 2nd renewed token
Thread.sleep(renewInterval / 2);
dtsm.stopThreads();
dtsm.startThreads();
Thread.sleep(250);
// both tokens should have been implicitly cancelled by now
verify(log, times(1)).logCancelDelegationToken(eq(ident1));
verify(log, times(1)).logCancelDelegationToken(eq(ident2));
} finally {
dtsm.stopThreads();
}
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.
the class TestDelegationToken method testDTManagerInSafeMode.
/**
* Test that the delegation token secret manager only runs when the
* NN is out of safe mode. This is because the secret manager
* has to log to the edit log, which should not be written in
* safe mode. Regression test for HDFS-2579.
*/
@Test
public void testDTManagerInSafeMode() throws Exception {
cluster.startDataNodes(config, 1, true, StartupOption.REGULAR, null);
FileSystem fs = cluster.getFileSystem();
for (int i = 0; i < 5; i++) {
DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short) 1, 1L);
}
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500);
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000);
cluster.setWaitSafeMode(false);
cluster.restartNameNode();
NameNode nn = cluster.getNameNode();
assertTrue(nn.isInSafeMode());
DelegationTokenSecretManager sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse("Secret manager should not run in safe mode", sm.isRunning());
NameNodeAdapter.leaveSafeMode(nn);
assertTrue("Secret manager should start when safe mode is exited", sm.isRunning());
LOG.info("========= entering safemode again");
NameNodeAdapter.enterSafeMode(nn, false);
assertFalse("Secret manager should stop again when safe mode " + "is manually entered", sm.isRunning());
// Set the cluster to leave safemode quickly on its own.
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
cluster.setWaitSafeMode(true);
cluster.restartNameNode();
nn = cluster.getNameNode();
sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse(nn.isInSafeMode());
assertTrue(sm.isRunning());
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.
the class TestDataNodeUGIProvider method getWebHdfsFileSystem.
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf, List<Token<DelegationTokenIdentifier>> tokens) throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(ugi.getUserName()), null, null);
FSNamesystem namesystem = mock(FSNamesystem.class);
DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(86400000, 86400000, 86400000, 86400000, namesystem);
dtSecretManager.startThreads();
Token<DelegationTokenIdentifier> token1 = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
Token<DelegationTokenIdentifier> token2 = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
SecurityUtil.setTokenService(token1, NetUtils.createSocketAddr(uri.getAuthority()));
SecurityUtil.setTokenService(token2, NetUtils.createSocketAddr(uri.getAuthority()));
token1.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
token2.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
tokens.add(token1);
tokens.add(token2);
ugi.addToken(token1);
ugi.addToken(token2);
}
return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.
the class TestWebHDFSForHA method testClientFailoverWhenStandbyNNHasStaleCredentials.
@Test
public void testClientFailoverWhenStandbyNNHasStaleCredentials() throws IOException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
fs = (WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf);
cluster.transitionToActive(0);
Token<?> token = fs.getDelegationToken(null);
final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(token.getIdentifier())));
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
final DelegationTokenSecretManager secretManager = NameNodeAdapter.getDtSecretManager(cluster.getNamesystem(0));
ExceptionHandler eh = new ExceptionHandler();
eh.initResponse(mock(HttpServletResponse.class));
Response resp = null;
try {
secretManager.retrievePassword(identifier);
} catch (IOException e) {
// Mimic the UserProvider class logic (server side) by throwing
// SecurityException here
Assert.assertTrue(e instanceof SecretManager.InvalidToken);
resp = eh.toResponse(new SecurityException(e));
}
// The Response (resp) below is what the server will send to client
//
// BEFORE HDFS-6475 fix, the resp.entity is
// {"RemoteException":{"exception":"SecurityException",
// "javaClassName":"java.lang.SecurityException",
// "message":"Failed to obtain user group information:
// org.apache.hadoop.security.token.SecretManager$InvalidToken:
// StandbyException"}}
// AFTER the fix, the resp.entity is
// {"RemoteException":{"exception":"StandbyException",
// "javaClassName":"org.apache.hadoop.ipc.StandbyException",
// "message":"Operation category READ is not supported in
// state standby"}}
//
// Mimic the client side logic by parsing the response from server
//
Map<?, ?> m = (Map<?, ?>) JSON.parse(resp.getEntity().toString());
RemoteException re = JsonUtilClient.toRemoteException(m);
Exception unwrapped = re.unwrapRemoteException(StandbyException.class);
Assert.assertTrue(unwrapped instanceof StandbyException);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations