use of org.apache.hadoop.security.HadoopKerberosName in project drill by axbaretto.
the class AbstractServerConnection method finalizeSaslSession.
@Override
public void finalizeSaslSession() throws IOException {
final String authorizationID = getSaslServer().getAuthorizationID();
final String remoteShortName = new HadoopKerberosName(authorizationID).getShortName();
final String localShortName = UserGroupInformation.getLoginUser().getShortUserName();
if (!localShortName.equals(remoteShortName)) {
throw new SaslException(String.format("'primary' part of remote drillbit's service principal " + "does not match with this drillbit's. Expected: '%s' Actual: '%s'", localShortName, remoteShortName));
}
getLogger().debug("Authenticated connection for {}", authorizationID);
}
use of org.apache.hadoop.security.HadoopKerberosName in project hadoop by apache.
the class TestSecureLogins method testValidKerberosName.
@Test
public void testValidKerberosName() throws Throwable {
new HadoopKerberosName(ZOOKEEPER).getShortName();
new HadoopKerberosName(ZOOKEEPER_LOCALHOST).getShortName();
new HadoopKerberosName(ZOOKEEPER_REALM).getShortName();
// standard rules don't pick this up
// new HadoopKerberosName(ZOOKEEPER_LOCALHOST_REALM).getShortName();
}
use of org.apache.hadoop.security.HadoopKerberosName in project ozone by apache.
the class OzoneDelegationTokenSecretManager method cancelToken.
/**
* Cancel a token by removing it from store and cache.
*
* @return Identifier of the canceled token
* @throws InvalidToken for invalid token
* @throws AccessControlException if the user isn't allowed to cancel
*/
@Override
public OzoneTokenIdentifier cancelToken(Token<OzoneTokenIdentifier> token, String canceller) throws IOException {
OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf(token.getIdentifier());
if (LOG.isDebugEnabled()) {
LOG.debug("Token cancellation requested for identifier: {}", formatTokenId(id));
}
if (id.getUser() == null) {
throw new InvalidToken("Token with no owner " + formatTokenId(id));
}
String owner = id.getUser().getUserName();
Text renewer = id.getRenewer();
HadoopKerberosName cancelerKrbName = new HadoopKerberosName(canceller);
String cancelerShortName = cancelerKrbName.getShortName();
if (!canceller.equals(owner) && (renewer == null || renewer.toString().isEmpty() || !cancelerShortName.equals(renewer.toString()))) {
throw new AccessControlException(canceller + " is not authorized to cancel the token " + formatTokenId(id));
}
// This check will be removed, when HA/Non-HA code is merged.
if (!isRatisEnabled) {
try {
store.removeToken(id);
} catch (IOException e) {
LOG.error("Unable to remove token " + id.getSequenceNumber(), e);
}
TokenInfo info = currentTokens.remove(id);
if (info == null) {
throw new InvalidToken("Token not found " + formatTokenId(id));
}
} else {
// Check whether token is there in-memory map of tokens or not on the
// OM leader.
TokenInfo info = currentTokens.get(id);
if (info == null) {
throw new InvalidToken("Token not found in-memory map of tokens" + formatTokenId(id));
}
}
return id;
}
use of org.apache.hadoop.security.HadoopKerberosName in project pxf by greenplum-db.
the class BaseSecurityService method doAs.
/**
* If user impersonation is configured, examines the request for the
* presence of the expected security headers and create a proxy user to
* execute further request chain. If security is enabled for the
* configuration server used for the requests, makes sure that a login
* UGI for the the Kerberos principal is created.
*
* <p>Responds with an HTTP error if the header is missing or the chain
* processing throws an exception.
*
* @param context the context for the given request
* @param action the action to be executed
* @throws Exception if the operation fails
*/
public <T> T doAs(RequestContext context, PrivilegedAction<T> action) throws Exception {
// retrieve user header and make sure header is present and is not empty
final String gpdbUser = context.getUser();
final String serverName = context.getServerName();
final String configDirectory = context.getConfig();
final Configuration configuration = context.getConfiguration();
final boolean isConstrainedDelegationEnabled = secureLogin.isConstrainedDelegationEnabled(configuration);
final boolean isUserImpersonationEnabled = secureLogin.isUserImpersonationEnabled(configuration);
final boolean isSecurityEnabled = Utilities.isSecurityEnabled(configuration);
// Establish the UGI for the login user or the Kerberos principal for the given server, if applicable
boolean exceptionDetected = false;
UserGroupInformation userGroupInformation = null;
try {
/*
get a login user that is either of:
- (non-secured cluster) - a user running the PXF process
- (non-secured cluster) - a user specified by pxf.service.user.name
- (secured cluster) - a Kerberos principal
*/
UserGroupInformation loginUser = secureLogin.getLoginUser(serverName, configDirectory, configuration);
// start with assuming identity of the PXF service to be the same as the login user
String serviceUser = loginUser.getUserName();
// for security without impersonation, allow the service user to be configurable
if (!isUserImpersonationEnabled && isSecurityEnabled) {
// if pxf.service.user.name property was provided we use the value as the remote user instead of
// the principal defined in pxf.service.kerberos.principal. However, either the principal will need
// to have proxy privileges on hadoop or Kerberos constrained delegation needs to be enabled.
String pxfServiceUserName = configuration.get(SecureLogin.CONFIG_KEY_SERVICE_USER_NAME);
if (StringUtils.isNotBlank(pxfServiceUserName)) {
serviceUser = pxfServiceUserName;
}
}
// establish the identity of the remote user that will be presented to the backend system
String remoteUser = (isUserImpersonationEnabled ? gpdbUser : serviceUser);
// ensure the remote user name is a fully qualified Kerberos principal, if applicable
if (isSecurityEnabled) {
// derive realm from the logged in user, rather than parsing principal info ourselves
String realm = (new HadoopKerberosName(loginUser.getUserName())).getRealm();
// include realm in the principal name, if required
remoteUser = expandRemoteUserName(configuration, remoteUser, realm, isConstrainedDelegationEnabled);
}
// set remote user so that it can be retrieved in the downstream logic, e.g. by PxfSaslPropertiesResolver
configuration.set(ConfigurationFactory.PXF_SESSION_REMOTE_USER_PROPERTY, remoteUser);
// validate and set properties required for enabling Kerberos constrained delegation, if necessary
processConstrainedDelegation(configuration, isSecurityEnabled, isConstrainedDelegationEnabled, remoteUser, loginUser.getUserName());
// Retrieve proxy user UGI from the UGI of the logged in user
if (isUserImpersonationEnabled || isConstrainedDelegationEnabled) {
LOG.debug("Creating proxy user = {}", remoteUser);
userGroupInformation = ugiProvider.createProxyUser(remoteUser, loginUser);
} else {
LOG.debug("Creating remote user = {}", remoteUser);
userGroupInformation = ugiProvider.createRemoteUser(remoteUser, loginUser, isSecurityEnabled);
}
LOG.debug("Retrieved proxy user {} for server {}", userGroupInformation, serverName);
LOG.debug("Performing request for gpdb_user = {} as [remote_user={}, service_user={}, login_user={}] with{} impersonation", gpdbUser, remoteUser, serviceUser, loginUser.getUserName(), isUserImpersonationEnabled ? "" : "out");
// Execute the servlet chain as that user
return userGroupInformation.doAs(action);
} catch (Exception e) {
exceptionDetected = true;
throw e;
} finally {
LOG.debug("Releasing UGI resources. {}", exceptionDetected ? " Exception while processing." : "");
try {
if (userGroupInformation != null) {
ugiProvider.destroy(userGroupInformation);
}
} catch (Throwable t) {
LOG.warn("Error releasing UGI resources, ignored.", t);
}
}
}
Aggregations