use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project hbase by apache.
the class AccessController method postOpen.
@Override
public void postOpen(ObserverContext<RegionCoprocessorEnvironment> c) {
RegionCoprocessorEnvironment env = c.getEnvironment();
final Region region = env.getRegion();
if (region == null) {
LOG.error("NULL region from RegionCoprocessorEnvironment in postOpen()");
return;
}
if (AccessControlLists.isAclRegion(region)) {
aclRegion = true;
// When this region is under recovering state, initialize will be handled by postLogReplay
if (!region.isRecovering()) {
try {
initialize(env);
} catch (IOException ex) {
// than perform checks incorrectly
throw new RuntimeException("Failed to initialize permissions cache", ex);
}
}
} else {
initialized = true;
}
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project hbase by apache.
the class AccessController method preCheckAndDelete.
@Override
public boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family, final byte[] qualifier, final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Delete delete, final boolean result) throws IOException {
// An ACL on a delete is useless, we shouldn't allow it
if (delete.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL) != null) {
throw new DoNotRetryIOException("ACL on checkAndDelete has no effect: " + delete.toString());
}
// Require READ and WRITE permissions on the table, CF, and the KV covered
// by the delete
RegionCoprocessorEnvironment env = c.getEnvironment();
Map<byte[], ? extends Collection<byte[]>> families = makeFamilyMap(family, qualifier);
User user = getActiveUser(c);
AuthResult authResult = permissionGranted(OpType.CHECK_AND_DELETE, user, env, families, Action.READ, Action.WRITE);
logResult(authResult);
if (!authResult.isAllowed()) {
if (cellFeaturesEnabled && !compatibleEarlyTermination) {
delete.setAttribute(CHECK_COVERING_PERM, TRUE);
} else if (authorizationEnabled) {
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
}
}
return result;
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project hbase by apache.
the class AccessController method preOpen.
/* ---- RegionObserver implementation ---- */
@Override
public void preOpen(ObserverContext<RegionCoprocessorEnvironment> c) throws IOException {
RegionCoprocessorEnvironment env = c.getEnvironment();
final Region region = env.getRegion();
if (region == null) {
LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()");
} else {
HRegionInfo regionInfo = region.getRegionInfo();
if (regionInfo.getTable().isSystemTable()) {
checkSystemOrSuperUser(getActiveUser(c));
} else {
requirePermission(getActiveUser(c), "preOpen", Action.ADMIN);
}
}
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project hbase by apache.
the class AccessController method preAppend.
@Override
public Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append) throws IOException {
User user = getActiveUser(c);
checkForReservedTagPresence(user, append);
// Require WRITE permission to the table, CF, and the KV to be appended
RegionCoprocessorEnvironment env = c.getEnvironment();
Map<byte[], ? extends Collection<Cell>> families = append.getFamilyCellMap();
AuthResult authResult = permissionGranted(OpType.APPEND, user, env, families, Action.WRITE);
logResult(authResult);
if (!authResult.isAllowed()) {
if (cellFeaturesEnabled && !compatibleEarlyTermination) {
append.setAttribute(CHECK_COVERING_PERM, TRUE);
} else if (authorizationEnabled) {
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
}
}
byte[] bytes = append.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL);
if (bytes != null) {
if (cellFeaturesEnabled) {
addCellPermissions(bytes, append.getFamilyCellMap());
} else {
throw new DoNotRetryIOException("Cell ACLs cannot be persisted");
}
}
return null;
}
use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project hbase by apache.
the class AccessController method internalPreRead.
private void internalPreRead(final ObserverContext<RegionCoprocessorEnvironment> c, final Query query, OpType opType) throws IOException {
Filter filter = query.getFilter();
// Don't wrap an AccessControlFilter
if (filter != null && filter instanceof AccessControlFilter) {
return;
}
User user = getActiveUser(c);
RegionCoprocessorEnvironment env = c.getEnvironment();
Map<byte[], ? extends Collection<byte[]>> families = null;
switch(opType) {
case GET:
case EXISTS:
families = ((Get) query).getFamilyMap();
break;
case SCAN:
families = ((Scan) query).getFamilyMap();
break;
default:
throw new RuntimeException("Unhandled operation " + opType);
}
AuthResult authResult = permissionGranted(opType, user, env, families, Action.READ);
Region region = getRegion(env);
TableName table = getTableName(region);
Map<ByteRange, Integer> cfVsMaxVersions = Maps.newHashMap();
for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {
cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions());
}
if (!authResult.isAllowed()) {
if (!cellFeaturesEnabled || compatibleEarlyTermination) {
// filter) but that's the price of backwards compatibility.
if (hasFamilyQualifierPermission(user, Action.READ, env, families)) {
authResult.setAllowed(true);
authResult.setReason("Access allowed with filter");
// Only wrap the filter if we are enforcing authorizations
if (authorizationEnabled) {
Filter ourFilter = new AccessControlFilter(authManager, user, table, AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, cfVsMaxVersions);
// wrap any existing filter
if (filter != null) {
ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, Lists.newArrayList(ourFilter, filter));
}
switch(opType) {
case GET:
case EXISTS:
((Get) query).setFilter(ourFilter);
break;
case SCAN:
((Scan) query).setFilter(ourFilter);
break;
default:
throw new RuntimeException("Unhandled operation " + opType);
}
}
}
} else {
// New behavior: Any access we might be granted is more fine-grained
// than whole table or CF. Simply inject a filter and return what is
// allowed. We will not throw an AccessDeniedException. This is a
// behavioral change since 0.96.
authResult.setAllowed(true);
authResult.setReason("Access allowed with filter");
// Only wrap the filter if we are enforcing authorizations
if (authorizationEnabled) {
Filter ourFilter = new AccessControlFilter(authManager, user, table, AccessControlFilter.Strategy.CHECK_CELL_DEFAULT, cfVsMaxVersions);
// wrap any existing filter
if (filter != null) {
ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, Lists.newArrayList(ourFilter, filter));
}
switch(opType) {
case GET:
case EXISTS:
((Get) query).setFilter(ourFilter);
break;
case SCAN:
((Scan) query).setFilter(ourFilter);
break;
default:
throw new RuntimeException("Unhandled operation " + opType);
}
}
}
}
logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") + "' (table=" + table + ", action=READ)");
}
}
Aggregations