use of org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier in project hbase by apache.
the class TestTableMapReduceUtil method testInitCredentialsForCluster4.
@Test
@SuppressWarnings("unchecked")
public void testInitCredentialsForCluster4() throws Exception {
HBaseTestingUtil util1 = new HBaseTestingUtil();
// Assume util1 is insecure cluster
// Do not start util1 because cannot boot secured mini cluster and insecure mini cluster at once
HBaseTestingUtil util2 = new HBaseTestingUtil();
File keytab = new File(util2.getDataTestDir("keytab").toUri().getPath());
MiniKdc kdc = util2.setupMiniKdc(keytab);
try {
String username = UserGroupInformation.getLoginUser().getShortUserName();
String userPrincipal = username + "/localhost";
kdc.createPrincipal(keytab, userPrincipal, HTTP_PRINCIPAL);
loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath());
try (Closeable util2Closeable = startSecureMiniCluster(util2, kdc, userPrincipal)) {
Configuration conf1 = util1.getConfiguration();
Job job = Job.getInstance(conf1);
TableMapReduceUtil.initCredentialsForCluster(job, util2.getConfiguration());
Credentials credentials = job.getCredentials();
Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
assertEquals(1, tokens.size());
String clusterId = ZKClusterId.readClusterIdZNode(util2.getZooKeeperWatcher());
Token<AuthenticationTokenIdentifier> tokenForCluster = (Token<AuthenticationTokenIdentifier>) credentials.getToken(new Text(clusterId));
assertEquals(userPrincipal + '@' + kdc.getRealm(), tokenForCluster.decodeIdentifier().getUsername());
}
} finally {
kdc.stop();
}
}
use of org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier in project oozie by apache.
the class HbaseCredentials method obtainToken.
private void obtainToken(Credentials credentials, final Configuration jobConf, Context context) throws IOException, InterruptedException {
String user = context.getWorkflow().getUser();
UserGroupInformation ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
User u = User.create(ugi);
// A direct doAs is required here vs. User#obtainAuthTokenForJob(...)
// See OOZIE-2419 for more
XLog.getLog(getClass()).debug("Getting Hbase token for user {0}", user);
Token<AuthenticationTokenIdentifier> token = u.runAs(new PrivilegedExceptionAction<Token<AuthenticationTokenIdentifier>>() {
public Token<AuthenticationTokenIdentifier> run() throws Exception {
Token<AuthenticationTokenIdentifier> newToken = null;
try (Connection connection = ConnectionFactory.createConnection(jobConf)) {
newToken = TokenUtil.obtainToken(connection);
}
return newToken;
}
});
XLog.getLog(getClass()).debug("Got token, adding it to credentials.");
credentials.addToken(CredentialsProviderFactory.getUniqueAlias(token), token);
}
use of org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier in project hbase by apache.
the class TestVerifyReplicationSecureClusterCredentials method testJobCredentials.
@Test
@SuppressWarnings("unchecked")
public void testJobCredentials() throws Exception {
Job job = new VerifyReplication().createSubmittableJob(new Configuration(UTIL1.getConfiguration()), new String[] { peer.get(), "table" });
Credentials credentials = job.getCredentials();
Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
assertEquals(2, tokens.size());
String clusterId1 = ZKClusterId.readClusterIdZNode(UTIL1.getZooKeeperWatcher());
Token<AuthenticationTokenIdentifier> tokenForCluster1 = (Token<AuthenticationTokenIdentifier>) credentials.getToken(new Text(clusterId1));
assertEquals(FULL_USER_PRINCIPAL, tokenForCluster1.decodeIdentifier().getUsername());
String clusterId2 = ZKClusterId.readClusterIdZNode(UTIL2.getZooKeeperWatcher());
Token<AuthenticationTokenIdentifier> tokenForCluster2 = (Token<AuthenticationTokenIdentifier>) credentials.getToken(new Text(clusterId2));
assertEquals(FULL_USER_PRINCIPAL, tokenForCluster2.decodeIdentifier().getUsername());
}
use of org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier in project hbase by apache.
the class TestTableMapReduceUtil method testInitCredentialsForCluster2.
@Test
@SuppressWarnings("unchecked")
public void testInitCredentialsForCluster2() throws Exception {
HBaseTestingUtil util1 = new HBaseTestingUtil();
HBaseTestingUtil util2 = new HBaseTestingUtil();
File keytab = new File(util1.getDataTestDir("keytab").toUri().getPath());
MiniKdc kdc = util1.setupMiniKdc(keytab);
try {
String username = UserGroupInformation.getLoginUser().getShortUserName();
String userPrincipal = username + "/localhost";
kdc.createPrincipal(keytab, userPrincipal, HTTP_PRINCIPAL);
loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath());
try (Closeable util1Closeable = startSecureMiniCluster(util1, kdc, userPrincipal);
Closeable util2Closeable = startSecureMiniCluster(util2, kdc, userPrincipal)) {
Configuration conf1 = util1.getConfiguration();
Job job = Job.getInstance(conf1);
TableMapReduceUtil.initCredentialsForCluster(job, util2.getConfiguration());
Credentials credentials = job.getCredentials();
Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
assertEquals(1, tokens.size());
String clusterId = ZKClusterId.readClusterIdZNode(util2.getZooKeeperWatcher());
Token<AuthenticationTokenIdentifier> tokenForCluster = (Token<AuthenticationTokenIdentifier>) credentials.getToken(new Text(clusterId));
assertEquals(userPrincipal + '@' + kdc.getRealm(), tokenForCluster.decodeIdentifier().getUsername());
}
} finally {
kdc.stop();
}
}
use of org.apache.hadoop.hbase.security.token.AuthenticationTokenIdentifier in project hbase by apache.
the class SecureBulkLoadManager method secureBulkLoadHFiles.
public Map<byte[], List<Path>> secureBulkLoadHFiles(final HRegion region, final BulkLoadHFileRequest request, List<String> clusterIds) throws IOException {
final List<Pair<byte[], String>> familyPaths = new ArrayList<>(request.getFamilyPathCount());
for (ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) {
familyPaths.add(new Pair<>(el.getFamily().toByteArray(), el.getPath()));
}
Token<AuthenticationTokenIdentifier> userToken = null;
if (userProvider.isHadoopSecurityEnabled()) {
userToken = new Token<>(request.getFsToken().getIdentifier().toByteArray(), request.getFsToken().getPassword().toByteArray(), new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService()));
}
final String bulkToken = request.getBulkToken();
User user = getActiveUser();
final UserGroupInformation ugi = user.getUGI();
if (userProvider.isHadoopSecurityEnabled()) {
try {
Token<AuthenticationTokenIdentifier> tok = ClientTokenUtil.obtainToken(conn).get();
if (tok != null) {
boolean b = ugi.addToken(tok);
LOG.debug("token added " + tok + " for user " + ugi + " return=" + b);
}
} catch (Exception ioe) {
LOG.warn("unable to add token", ioe);
}
}
if (userToken != null) {
ugi.addToken(userToken);
} else if (userProvider.isHadoopSecurityEnabled()) {
// for mini cluster testing
throw new DoNotRetryIOException("User token cannot be null");
}
if (region.getCoprocessorHost() != null) {
region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
}
Map<byte[], List<Path>> map = null;
try {
incrementUgiReference(ugi);
// ('request user'), another for the target fs (HBase region server principal).
if (userProvider.isHadoopSecurityEnabled()) {
FsDelegationToken targetfsDelegationToken = new FsDelegationToken(userProvider, "renewer");
targetfsDelegationToken.acquireDelegationToken(fs);
Token<?> targetFsToken = targetfsDelegationToken.getUserToken();
if (targetFsToken != null && (userToken == null || !targetFsToken.getService().equals(userToken.getService()))) {
ugi.addToken(targetFsToken);
}
}
map = ugi.doAs(new PrivilegedAction<Map<byte[], List<Path>>>() {
@Override
public Map<byte[], List<Path>> run() {
FileSystem fs = null;
try {
/*
* This is creating and caching a new FileSystem instance. Other code called
* "beneath" this method will rely on this FileSystem instance being in the
* cache. This is important as those methods make _no_ attempt to close this
* FileSystem instance. It is critical that here, in SecureBulkLoadManager,
* we are tracking the lifecycle and closing the FS when safe to do so.
*/
fs = FileSystem.get(conf);
for (Pair<byte[], String> el : familyPaths) {
Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
if (!fs.exists(stageFamily)) {
fs.mkdirs(stageFamily);
fs.setPermission(stageFamily, PERM_ALL_ACCESS);
}
}
if (fsCreatedListener != null) {
fsCreatedListener.accept(region);
}
// To enable access prior to staging
return region.bulkLoadHFiles(familyPaths, true, new SecureBulkLoadListener(fs, bulkToken, conf), request.getCopyFile(), clusterIds, request.getReplicate());
} catch (Exception e) {
LOG.error("Failed to complete bulk load", e);
}
return null;
}
});
} finally {
decrementUgiReference(ugi);
try {
if (!UserGroupInformation.getLoginUser().equals(ugi) && !isUserReferenced(ugi)) {
FileSystem.closeAllForUGI(ugi);
}
} catch (IOException e) {
LOG.error("Failed to close FileSystem for: {}", ugi, e);
}
if (region.getCoprocessorHost() != null) {
region.getCoprocessorHost().postBulkLoadHFile(familyPaths, map);
}
}
return map;
}
Aggregations