use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class JspHelper method getTokenUGI.
private static UserGroupInformation getTokenUGI(ServletContext context, HttpServletRequest request, String tokenString, Configuration conf) throws IOException {
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(tokenString);
InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
if (serviceAddress != null) {
SecurityUtil.setTokenService(token, serviceAddress);
token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
}
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
if (context != null) {
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
if (nn != null) {
// Verify the token.
nn.getNamesystem().verifyToken(id, token.getPassword());
}
}
UserGroupInformation ugi = id.getUser();
ugi.addToken(token);
return ugi;
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class TestCheckPointForSecurityTokens method testSaveNamespace.
/**
* Tests save namespace.
*/
@Test
public void testSaveNamespace() throws IOException {
DistributedFileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs = cluster.getFileSystem();
FSNamesystem namesystem = cluster.getNamesystem();
String renewer = UserGroupInformation.getLoginUser().getUserName();
Token<DelegationTokenIdentifier> token1 = namesystem.getDelegationToken(new Text(renewer));
Token<DelegationTokenIdentifier> token2 = namesystem.getDelegationToken(new Text(renewer));
// Saving image without safe mode should fail
DFSAdmin admin = new DFSAdmin(conf);
String[] args = new String[] { "-saveNamespace" };
// verify that the edits file is NOT empty
NameNode nn = cluster.getNameNode();
for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.scanLog(Long.MAX_VALUE, true);
long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should have 5 transactions", 5, numTransactions);
;
}
// Saving image in safe mode should succeed
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
} catch (Exception e) {
throw new IOException(e.getMessage());
}
// verify that the edits file is empty except for the START txn
for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.scanLog(Long.MAX_VALUE, true);
long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should only have START txn", 1, numTransactions);
}
// restart cluster
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
//Should be able to renew & cancel the delegation token after cluster restart
try {
renewToken(token1);
renewToken(token2);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
namesystem = cluster.getNamesystem();
Token<DelegationTokenIdentifier> token3 = namesystem.getDelegationToken(new Text(renewer));
Token<DelegationTokenIdentifier> token4 = namesystem.getDelegationToken(new Text(renewer));
// restart cluster again
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
Token<DelegationTokenIdentifier> token5 = namesystem.getDelegationToken(new Text(renewer));
try {
renewToken(token1);
renewToken(token2);
renewToken(token3);
renewToken(token4);
renewToken(token5);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
// restart cluster again
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
try {
renewToken(token1);
cancelToken(token1);
renewToken(token2);
cancelToken(token2);
renewToken(token3);
cancelToken(token3);
renewToken(token4);
cancelToken(token4);
renewToken(token5);
cancelToken(token5);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
} finally {
if (fs != null)
fs.close();
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class DataNodeUGIProvider method ugi.
UserGroupInformation ugi() throws IOException {
UserGroupInformation ugi;
try {
if (UserGroupInformation.isSecurityEnabled()) {
final Token<DelegationTokenIdentifier> token = params.delegationToken();
ugi = ugiCache.get(buildTokenCacheKey(token), new Callable<UserGroupInformation>() {
@Override
public UserGroupInformation call() throws Exception {
return tokenUGI(token);
}
});
} else {
final String usernameFromQuery = params.userName();
final String doAsUserFromQuery = params.doAsUser();
final String remoteUser = usernameFromQuery == null ? JspHelper.getDefaultWebUserName(// not specified in request
params.conf()) : usernameFromQuery;
ugi = ugiCache.get(buildNonTokenCacheKey(doAsUserFromQuery, remoteUser), new Callable<UserGroupInformation>() {
@Override
public UserGroupInformation call() throws Exception {
return nonTokenUGI(usernameFromQuery, doAsUserFromQuery, remoteUser);
}
});
}
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
throw (IOException) cause;
} else {
throw new IOException(cause);
}
}
return ugi;
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class ParameterParser method delegationToken.
Token<DelegationTokenIdentifier> delegationToken() throws IOException {
String delegation = param(DelegationParam.NAME);
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(delegation);
URI nnUri = URI.create(HDFS_URI_SCHEME + "://" + namenodeId());
boolean isLogical = HAUtilClient.isLogicalUri(conf, nnUri);
if (isLogical) {
token.setService(HAUtilClient.buildTokenServiceForLogicalUri(nnUri, HDFS_URI_SCHEME));
} else {
token.setService(SecurityUtil.buildTokenService(nnUri));
}
return token;
}
use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier in project hadoop by apache.
the class FSNamesystem method cancelDelegationToken.
/**
*
* @param token token to cancel
* @throws IOException on error
*/
void cancelDelegationToken(Token<DelegationTokenIdentifier> token) throws IOException {
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot cancel delegation token");
String canceller = getRemoteUser().getUserName();
DelegationTokenIdentifier id = dtSecretManager.cancelToken(token, canceller);
getEditLog().logCancelDelegationToken(id);
} finally {
writeUnlock("cancelDelegationToken");
}
getEditLog().logSync();
}
Aggregations