use of org.apache.hadoop.security.UserGroupInformation in project hbase by apache.
the class RegionServerQuotaManager method checkQuota.
/**
* Check the quota for the current (rpc-context) user.
* Returns the OperationQuota used to get the available quota and
* to report the data/usage of the operation.
* @param region the region where the operation will be performed
* @param numWrites number of writes to perform
* @param numReads number of short-reads to perform
* @param numScans number of scan to perform
* @return the OperationQuota
* @throws ThrottlingException if the operation cannot be executed due to quota exceeded.
*/
private OperationQuota checkQuota(final Region region, final int numWrites, final int numReads, final int numScans) throws IOException, ThrottlingException {
User user = RpcServer.getRequestUser();
UserGroupInformation ugi;
if (user != null) {
ugi = user.getUGI();
} else {
ugi = User.getCurrent().getUGI();
}
TableName table = region.getTableDesc().getTableName();
OperationQuota quota = getQuota(ugi, table);
try {
quota.checkQuota(numWrites, numReads, numScans);
} catch (ThrottlingException e) {
LOG.debug("Throttling exception for user=" + ugi.getUserName() + " table=" + table + " numWrites=" + numWrites + " numReads=" + numReads + " numScans=" + numScans + ": " + e.getMessage());
throw e;
}
return quota;
}
use of org.apache.hadoop.security.UserGroupInformation in project hbase by apache.
the class TokenProvider method getAuthenticationToken.
@Override
public void getAuthenticationToken(RpcController controller, AuthenticationProtos.GetAuthenticationTokenRequest request, RpcCallback<AuthenticationProtos.GetAuthenticationTokenResponse> done) {
AuthenticationProtos.GetAuthenticationTokenResponse.Builder response = AuthenticationProtos.GetAuthenticationTokenResponse.newBuilder();
try {
if (secretManager == null) {
throw new IOException("No secret manager configured for token authentication");
}
User currentUser = RpcServer.getRequestUser();
UserGroupInformation ugi = null;
if (currentUser != null) {
ugi = currentUser.getUGI();
}
if (currentUser == null) {
throw new AccessDeniedException("No authenticated user for request!");
} else if (!isAllowedDelegationTokenOp(ugi)) {
LOG.warn("Token generation denied for user=" + currentUser.getName() + ", authMethod=" + ugi.getAuthenticationMethod());
throw new AccessDeniedException("Token generation only allowed for Kerberos authenticated clients");
}
Token<AuthenticationTokenIdentifier> token = secretManager.generateToken(currentUser.getName());
response.setToken(TokenUtil.toToken(token)).build();
} catch (IOException ioe) {
CoprocessorRpcUtils.setControllerException(controller, ioe);
}
done.run(response.build());
}
use of org.apache.hadoop.security.UserGroupInformation in project hive by apache.
the class Initiator method checkForCompaction.
private CompactionType checkForCompaction(final CompactionInfo ci, final ValidTxnList txns, final StorageDescriptor sd, final Map<String, String> tblproperties, final String runAs) throws IOException, InterruptedException {
// If it's marked as too many aborted, we already know we need to compact
if (ci.tooManyAborts) {
LOG.debug("Found too many aborted transactions for " + ci.getFullPartitionName() + ", " + "initiating major compaction");
return CompactionType.MAJOR;
}
if (runJobAsSelf(runAs)) {
return determineCompactionType(ci, txns, sd, tblproperties);
} else {
LOG.info("Going to initiate as user " + runAs);
UserGroupInformation ugi = UserGroupInformation.createProxyUser(runAs, UserGroupInformation.getLoginUser());
CompactionType compactionType = ugi.doAs(new PrivilegedExceptionAction<CompactionType>() {
@Override
public CompactionType run() throws Exception {
return determineCompactionType(ci, txns, sd, tblproperties);
}
});
try {
FileSystem.closeAllForUGI(ugi);
} catch (IOException exception) {
LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + ci.getFullPartitionName(), exception);
}
return compactionType;
}
}
use of org.apache.hadoop.security.UserGroupInformation in project hive by apache.
the class TempletonControllerJob method buildHcatDelegationToken.
private String buildHcatDelegationToken(String user) throws IOException, InterruptedException, TException {
final HiveConf c = new HiveConf();
LOG.debug("Creating hive metastore delegation token for user " + user);
final UserGroupInformation ugi = UgiFactory.getUgi(user);
UserGroupInformation real = ugi.getRealUser();
return real.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws IOException, TException, InterruptedException {
final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c);
return ugi.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws IOException, TException, InterruptedException {
String u = ugi.getUserName();
return client.getDelegationToken(c.getUser(), u);
}
});
}
});
}
use of org.apache.hadoop.security.UserGroupInformation in project hbase by apache.
the class ThriftHttpServlet method doPost.
@Override
protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
String effectiveUser = request.getRemoteUser();
if (securityEnabled) {
try {
// As Thrift HTTP transport doesn't support SPNEGO yet (THRIFT-889),
// Kerberos authentication is being done at servlet level.
effectiveUser = doKerberosAuth(request);
// It is standard for client applications expect this header.
// Please see http://tools.ietf.org/html/rfc4559 for more details.
response.addHeader(WWW_AUTHENTICATE, NEGOTIATE + " " + outToken);
} catch (HttpAuthenticationException e) {
LOG.error("Kerberos Authentication failed", e);
// Send a 401 to the client
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.addHeader(WWW_AUTHENTICATE, NEGOTIATE);
response.getWriter().println("Authentication Error: " + e.getMessage());
return;
}
}
String doAsUserFromQuery = request.getHeader("doAs");
if (effectiveUser == null) {
effectiveUser = realUser.getShortUserName();
}
if (doAsUserFromQuery != null) {
if (!doAsEnabled) {
throw new ServletException("Support for proxyuser is not configured");
}
// The authenticated remote user is attempting to perform 'doAs' proxy user.
UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(effectiveUser);
// create and attempt to authorize a proxy user (the client is attempting
// to do proxy user)
UserGroupInformation ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, remoteUser);
// validate the proxy user authorization
try {
ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
} catch (AuthorizationException e) {
throw new ServletException(e.getMessage());
}
effectiveUser = doAsUserFromQuery;
}
hbaseHandler.setEffectiveUser(effectiveUser);
super.doPost(request, response);
}
Aggregations