use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project ranger by apache.
the class RangerHBasePlugin method start.
@Override
public void start(CoprocessorEnvironment env) throws IOException {
String appType = "unknown";
if (env instanceof MasterCoprocessorEnvironment) {
coprocessorType = MASTER_COPROCESSOR_TYPE;
appType = "hbaseMaster";
} else if (env instanceof RegionServerCoprocessorEnvironment) {
coprocessorType = REGIONAL_SERVER_COPROCESSOR_TYPE;
appType = "hbaseRegional";
} else if (env instanceof RegionCoprocessorEnvironment) {
regionEnv = (RegionCoprocessorEnvironment) env;
coprocessorType = REGIONAL_COPROCESSOR_TYPE;
appType = "hbaseRegional";
}
Configuration conf = env.getConfiguration();
HbaseFactory.initialize(conf);
// create and initialize the plugin class
RangerHBasePlugin plugin = hbasePlugin;
if (plugin == null) {
synchronized (RangerAuthorizationCoprocessor.class) {
plugin = hbasePlugin;
if (plugin == null) {
plugin = new RangerHBasePlugin(appType);
plugin.init();
UpdateRangerPoliciesOnGrantRevoke = RangerConfiguration.getInstance().getBoolean(RangerHadoopConstants.HBASE_UPDATE_RANGER_POLICIES_ON_GRANT_REVOKE_PROP, RangerHadoopConstants.HBASE_UPDATE_RANGER_POLICIES_ON_GRANT_REVOKE_DEFAULT_VALUE);
hbasePlugin = plugin;
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Start of Coprocessor: [" + coprocessorType + "]");
}
}
use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project phoenix by apache.
the class PhoenixAccessController method preCreateTable.
@Override
public void preCreateTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId, String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType, Set<byte[]> familySet, Set<TableName> indexes) throws IOException {
if (!accessCheckEnabled) {
return;
}
if (tableType != PTableType.VIEW) {
final HTableDescriptor htd = new HTableDescriptor(physicalTableName);
for (byte[] familyName : familySet) {
htd.addFamily(new HColumnDescriptor(familyName));
}
for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
observer.preCreateTable(new ObserverContext<MasterCoprocessorEnvironment>(), htd, null);
}
}
// Index and view require read access on parent physical table.
Set<TableName> physicalTablesChecked = new HashSet<TableName>();
if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
physicalTablesChecked.add(parentPhysicalTableName);
requireAccess("Create" + tableType, parentPhysicalTableName, Action.READ, Action.EXEC);
}
if (tableType == PTableType.VIEW) {
Action[] requiredActions = { Action.READ, Action.EXEC };
for (TableName index : indexes) {
if (!physicalTablesChecked.add(index)) {
// And for same physical table multiple times like view index table
continue;
}
User user = getActiveUser();
List<UserPermission> permissionForUser = getPermissionForUser(getUserPermissions(index), Bytes.toBytes(user.getShortName()));
Set<Action> requireAccess = new HashSet<>();
Set<Action> accessExists = new HashSet<>();
if (permissionForUser != null) {
for (UserPermission userPermission : permissionForUser) {
for (Action action : Arrays.asList(requiredActions)) {
if (!userPermission.implies(action)) {
requireAccess.add(action);
}
}
}
if (!requireAccess.isEmpty()) {
for (UserPermission userPermission : permissionForUser) {
accessExists.addAll(Arrays.asList(userPermission.getActions()));
}
}
} else {
requireAccess.addAll(Arrays.asList(requiredActions));
}
if (!requireAccess.isEmpty()) {
byte[] indexPhysicalTable = index.getName();
handleRequireAccessOnDependentTable("Create" + tableType, user.getName(), TableName.valueOf(indexPhysicalTable), tableName, requireAccess, accessExists);
}
}
}
if (tableType == PTableType.INDEX) {
// skip check for local index
if (physicalTableName != null && !parentPhysicalTableName.equals(physicalTableName) && !MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) {
authorizeOrGrantAccessToUsers("Create" + tableType, parentPhysicalTableName, Arrays.asList(Action.READ, Action.WRITE, Action.CREATE, Action.EXEC, Action.ADMIN), physicalTableName);
}
}
}
use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project hbase by apache.
the class AccessController method start.
/* ---- MasterObserver implementation ---- */
@Override
public void start(CoprocessorEnvironment env) throws IOException {
CompoundConfiguration conf = new CompoundConfiguration();
conf.add(env.getConfiguration());
authorizationEnabled = AccessChecker.isAuthorizationSupported(conf);
if (!authorizationEnabled) {
LOG.warn("AccessController has been loaded with authorization checks DISABLED!");
}
shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
cellFeaturesEnabled = (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS);
if (!cellFeaturesEnabled) {
LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY + " accordingly.");
}
if (env instanceof MasterCoprocessorEnvironment) {
// if running on HMaster
MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
if (mEnv instanceof HasMasterServices) {
MasterServices masterServices = ((HasMasterServices) mEnv).getMasterServices();
zkPermissionWatcher = masterServices.getZKPermissionWatcher();
accessChecker = masterServices.getAccessChecker();
}
} else if (env instanceof RegionServerCoprocessorEnvironment) {
RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
if (rsEnv instanceof HasRegionServerServices) {
RegionServerServices rsServices = ((HasRegionServerServices) rsEnv).getRegionServerServices();
zkPermissionWatcher = rsServices.getZKPermissionWatcher();
accessChecker = rsServices.getAccessChecker();
}
} else if (env instanceof RegionCoprocessorEnvironment) {
// if running at region
regionEnv = (RegionCoprocessorEnvironment) env;
conf.addBytesMap(regionEnv.getRegion().getTableDescriptor().getValues());
compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
if (regionEnv instanceof HasRegionServerServices) {
RegionServerServices rsServices = ((HasRegionServerServices) regionEnv).getRegionServerServices();
zkPermissionWatcher = rsServices.getZKPermissionWatcher();
accessChecker = rsServices.getAccessChecker();
}
}
Preconditions.checkState(zkPermissionWatcher != null, "ZKPermissionWatcher is null");
Preconditions.checkState(accessChecker != null, "AccessChecker is null");
// set the user-provider.
this.userProvider = UserProvider.instantiate(env.getConfiguration());
tableAcls = new MapMaker().weakValues().makeMap();
}
use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project hbase by apache.
the class SnapshotScannerHDFSAclController method postGrant.
@Override
public void postGrant(ObserverContext<MasterCoprocessorEnvironment> c, UserPermission userPermission, boolean mergeExistingPermissions) throws IOException {
if (!checkInitialized("grant " + userPermission + ", merge existing permissions " + mergeExistingPermissions)) {
return;
}
try (Table aclTable = c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
Configuration conf = c.getEnvironment().getConfiguration();
String userName = userPermission.getUser();
switch(userPermission.getAccessScope()) {
case GLOBAL:
UserPermission perm = getUserGlobalPermission(conf, userName);
if (perm != null && hdfsAclHelper.containReadAction(perm)) {
if (!isHdfsAclSet(aclTable, userName)) {
// 1. Get namespaces and tables which global user acls are already synced
Pair<Set<String>, Set<TableName>> skipNamespaceAndTables = SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName);
Set<String> skipNamespaces = skipNamespaceAndTables.getFirst();
Set<TableName> skipTables = skipNamespaceAndTables.getSecond().stream().filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())).collect(Collectors.toSet());
// 2. Add HDFS acl(skip namespaces and tables directories whose acl is set)
hdfsAclHelper.grantAcl(userPermission, skipNamespaces, skipTables);
// 3. Record global acl is sync to HDFS
SnapshotScannerHDFSAclStorage.addUserGlobalHdfsAcl(aclTable, userName);
}
} else {
// The merged user permission doesn't contain READ, so remove user global HDFS acls if
// it's set
removeUserGlobalHdfsAcl(aclTable, userName, userPermission);
}
break;
case NAMESPACE:
String namespace = ((NamespacePermission) userPermission.getPermission()).getNamespace();
UserPermission nsPerm = getUserNamespacePermission(conf, userName, namespace);
if (nsPerm != null && hdfsAclHelper.containReadAction(nsPerm)) {
if (!isHdfsAclSet(aclTable, userName, namespace)) {
// 1. Get tables which namespace user acls are already synced
Set<TableName> skipTables = SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName).getSecond();
// 2. Add HDFS acl(skip tables directories whose acl is set)
hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), skipTables);
}
// 3. Record namespace acl is synced to HDFS
SnapshotScannerHDFSAclStorage.addUserNamespaceHdfsAcl(aclTable, userName, namespace);
} else {
// The merged user permission doesn't contain READ, so remove user namespace HDFS acls
// if it's set
removeUserNamespaceHdfsAcl(aclTable, userName, namespace, userPermission);
}
break;
case TABLE:
TablePermission tablePerm = (TablePermission) userPermission.getPermission();
if (needHandleTableHdfsAcl(tablePerm)) {
TableName tableName = tablePerm.getTableName();
UserPermission tPerm = getUserTablePermission(conf, userName, tableName);
if (tPerm != null && hdfsAclHelper.containReadAction(tPerm)) {
if (!isHdfsAclSet(aclTable, userName, tableName)) {
// 1. create table dirs
hdfsAclHelper.createTableDirectories(tableName);
// 2. Add HDFS acl
hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), new HashSet<>(0));
}
// 2. Record table acl is synced to HDFS
SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(aclTable, userName, tableName);
} else {
// The merged user permission doesn't contain READ, so remove user table HDFS acls if
// it's set
removeUserTableHdfsAcl(aclTable, userName, tableName, userPermission);
}
}
break;
default:
throw new IllegalArgumentException("Illegal user permission scope " + userPermission.getAccessScope());
}
}
}
use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project hbase by apache.
the class JMXListener method start.
@Override
public void start(CoprocessorEnvironment env) throws IOException {
int rmiRegistryPort = -1;
int rmiConnectorPort = -1;
Configuration conf = env.getConfiguration();
if (env instanceof MasterCoprocessorEnvironment) {
// running on Master
rmiRegistryPort = conf.getInt("master" + RMI_REGISTRY_PORT_CONF_KEY, defMasterRMIRegistryPort);
rmiConnectorPort = conf.getInt("master" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort);
LOG.info("Master rmiRegistryPort:" + rmiRegistryPort + ",Master rmiConnectorPort:" + rmiConnectorPort);
} else if (env instanceof RegionServerCoprocessorEnvironment) {
// running on RegionServer
rmiRegistryPort = conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, defRegionserverRMIRegistryPort);
rmiConnectorPort = conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort);
LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort + ",RegionServer rmiConnectorPort:" + rmiConnectorPort);
} else if (env instanceof RegionCoprocessorEnvironment) {
LOG.error("JMXListener should not be loaded in Region Environment!");
return;
}
synchronized (JMXListener.class) {
if (JMX_CS != null) {
LOG.info("JMXListener has been started at Registry port " + rmiRegistryPort);
} else {
startConnectorServer(rmiRegistryPort, rmiConnectorPort);
}
}
}
Aggregations