Search in sources :

Example 1 with MasterCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project ranger by apache.

the class RangerHBasePlugin method start.

@Override
public void start(CoprocessorEnvironment env) throws IOException {
    String appType = "unknown";
    if (env instanceof MasterCoprocessorEnvironment) {
        coprocessorType = MASTER_COPROCESSOR_TYPE;
        appType = "hbaseMaster";
    } else if (env instanceof RegionServerCoprocessorEnvironment) {
        coprocessorType = REGIONAL_SERVER_COPROCESSOR_TYPE;
        appType = "hbaseRegional";
    } else if (env instanceof RegionCoprocessorEnvironment) {
        regionEnv = (RegionCoprocessorEnvironment) env;
        coprocessorType = REGIONAL_COPROCESSOR_TYPE;
        appType = "hbaseRegional";
    }
    Configuration conf = env.getConfiguration();
    HbaseFactory.initialize(conf);
    // create and initialize the plugin class
    RangerHBasePlugin plugin = hbasePlugin;
    if (plugin == null) {
        synchronized (RangerAuthorizationCoprocessor.class) {
            plugin = hbasePlugin;
            if (plugin == null) {
                plugin = new RangerHBasePlugin(appType);
                plugin.init();
                UpdateRangerPoliciesOnGrantRevoke = RangerConfiguration.getInstance().getBoolean(RangerHadoopConstants.HBASE_UPDATE_RANGER_POLICIES_ON_GRANT_REVOKE_PROP, RangerHadoopConstants.HBASE_UPDATE_RANGER_POLICIES_ON_GRANT_REVOKE_DEFAULT_VALUE);
                hbasePlugin = plugin;
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Start of Coprocessor: [" + coprocessorType + "]");
    }
}
Also used : RegionServerCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Configuration(org.apache.hadoop.conf.Configuration) RangerConfiguration(org.apache.ranger.authorization.hadoop.config.RangerConfiguration) MasterCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment)

Example 2 with MasterCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project phoenix by apache.

the class PhoenixAccessController method preCreateTable.

@Override
public void preCreateTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String tenantId, String tableName, TableName physicalTableName, TableName parentPhysicalTableName, PTableType tableType, Set<byte[]> familySet, Set<TableName> indexes) throws IOException {
    if (!accessCheckEnabled) {
        return;
    }
    if (tableType != PTableType.VIEW) {
        final HTableDescriptor htd = new HTableDescriptor(physicalTableName);
        for (byte[] familyName : familySet) {
            htd.addFamily(new HColumnDescriptor(familyName));
        }
        for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
            observer.preCreateTable(new ObserverContext<MasterCoprocessorEnvironment>(), htd, null);
        }
    }
    // Index and view require read access on parent physical table.
    Set<TableName> physicalTablesChecked = new HashSet<TableName>();
    if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
        physicalTablesChecked.add(parentPhysicalTableName);
        requireAccess("Create" + tableType, parentPhysicalTableName, Action.READ, Action.EXEC);
    }
    if (tableType == PTableType.VIEW) {
        Action[] requiredActions = { Action.READ, Action.EXEC };
        for (TableName index : indexes) {
            if (!physicalTablesChecked.add(index)) {
                // And for same physical table multiple times like view index table
                continue;
            }
            User user = getActiveUser();
            List<UserPermission> permissionForUser = getPermissionForUser(getUserPermissions(index), Bytes.toBytes(user.getShortName()));
            Set<Action> requireAccess = new HashSet<>();
            Set<Action> accessExists = new HashSet<>();
            if (permissionForUser != null) {
                for (UserPermission userPermission : permissionForUser) {
                    for (Action action : Arrays.asList(requiredActions)) {
                        if (!userPermission.implies(action)) {
                            requireAccess.add(action);
                        }
                    }
                }
                if (!requireAccess.isEmpty()) {
                    for (UserPermission userPermission : permissionForUser) {
                        accessExists.addAll(Arrays.asList(userPermission.getActions()));
                    }
                }
            } else {
                requireAccess.addAll(Arrays.asList(requiredActions));
            }
            if (!requireAccess.isEmpty()) {
                byte[] indexPhysicalTable = index.getName();
                handleRequireAccessOnDependentTable("Create" + tableType, user.getName(), TableName.valueOf(indexPhysicalTable), tableName, requireAccess, accessExists);
            }
        }
    }
    if (tableType == PTableType.INDEX) {
        // skip check for local index
        if (physicalTableName != null && !parentPhysicalTableName.equals(physicalTableName) && !MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) {
            authorizeOrGrantAccessToUsers("Create" + tableType, parentPhysicalTableName, Arrays.asList(Action.READ, Action.WRITE, Action.CREATE, Action.EXEC, Action.ADMIN), physicalTableName);
        }
    }
}
Also used : PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) Action(org.apache.hadoop.hbase.security.access.Permission.Action) User(org.apache.hadoop.hbase.security.User) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) BaseMasterAndRegionObserver(org.apache.hadoop.hbase.coprocessor.BaseMasterAndRegionObserver) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) MasterCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment) HashSet(java.util.HashSet) UserPermission(org.apache.hadoop.hbase.security.access.UserPermission)

Example 3 with MasterCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project hbase by apache.

the class AccessController method start.

/* ---- MasterObserver implementation ---- */
@Override
public void start(CoprocessorEnvironment env) throws IOException {
    CompoundConfiguration conf = new CompoundConfiguration();
    conf.add(env.getConfiguration());
    authorizationEnabled = AccessChecker.isAuthorizationSupported(conf);
    if (!authorizationEnabled) {
        LOG.warn("AccessController has been loaded with authorization checks DISABLED!");
    }
    shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
    cellFeaturesEnabled = (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS);
    if (!cellFeaturesEnabled) {
        LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY + " accordingly.");
    }
    if (env instanceof MasterCoprocessorEnvironment) {
        // if running on HMaster
        MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
        if (mEnv instanceof HasMasterServices) {
            MasterServices masterServices = ((HasMasterServices) mEnv).getMasterServices();
            zkPermissionWatcher = masterServices.getZKPermissionWatcher();
            accessChecker = masterServices.getAccessChecker();
        }
    } else if (env instanceof RegionServerCoprocessorEnvironment) {
        RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
        if (rsEnv instanceof HasRegionServerServices) {
            RegionServerServices rsServices = ((HasRegionServerServices) rsEnv).getRegionServerServices();
            zkPermissionWatcher = rsServices.getZKPermissionWatcher();
            accessChecker = rsServices.getAccessChecker();
        }
    } else if (env instanceof RegionCoprocessorEnvironment) {
        // if running at region
        regionEnv = (RegionCoprocessorEnvironment) env;
        conf.addBytesMap(regionEnv.getRegion().getTableDescriptor().getValues());
        compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
        if (regionEnv instanceof HasRegionServerServices) {
            RegionServerServices rsServices = ((HasRegionServerServices) regionEnv).getRegionServerServices();
            zkPermissionWatcher = rsServices.getZKPermissionWatcher();
            accessChecker = rsServices.getAccessChecker();
        }
    }
    Preconditions.checkState(zkPermissionWatcher != null, "ZKPermissionWatcher is null");
    Preconditions.checkState(accessChecker != null, "AccessChecker is null");
    // set the user-provider.
    this.userProvider = UserProvider.instantiate(env.getConfiguration());
    tableAcls = new MapMaker().weakValues().makeMap();
}
Also used : HasMasterServices(org.apache.hadoop.hbase.coprocessor.HasMasterServices) RegionServerCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) HasRegionServerServices(org.apache.hadoop.hbase.coprocessor.HasRegionServerServices) RegionServerServices(org.apache.hadoop.hbase.regionserver.RegionServerServices) HasRegionServerServices(org.apache.hadoop.hbase.coprocessor.HasRegionServerServices) MapMaker(org.apache.hbase.thirdparty.com.google.common.collect.MapMaker) CompoundConfiguration(org.apache.hadoop.hbase.CompoundConfiguration) MasterCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment) MasterServices(org.apache.hadoop.hbase.master.MasterServices) HasMasterServices(org.apache.hadoop.hbase.coprocessor.HasMasterServices)

Example 4 with MasterCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project hbase by apache.

the class SnapshotScannerHDFSAclController method postGrant.

@Override
public void postGrant(ObserverContext<MasterCoprocessorEnvironment> c, UserPermission userPermission, boolean mergeExistingPermissions) throws IOException {
    if (!checkInitialized("grant " + userPermission + ", merge existing permissions " + mergeExistingPermissions)) {
        return;
    }
    try (Table aclTable = c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
        Configuration conf = c.getEnvironment().getConfiguration();
        String userName = userPermission.getUser();
        switch(userPermission.getAccessScope()) {
            case GLOBAL:
                UserPermission perm = getUserGlobalPermission(conf, userName);
                if (perm != null && hdfsAclHelper.containReadAction(perm)) {
                    if (!isHdfsAclSet(aclTable, userName)) {
                        // 1. Get namespaces and tables which global user acls are already synced
                        Pair<Set<String>, Set<TableName>> skipNamespaceAndTables = SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName);
                        Set<String> skipNamespaces = skipNamespaceAndTables.getFirst();
                        Set<TableName> skipTables = skipNamespaceAndTables.getSecond().stream().filter(t -> !skipNamespaces.contains(t.getNamespaceAsString())).collect(Collectors.toSet());
                        // 2. Add HDFS acl(skip namespaces and tables directories whose acl is set)
                        hdfsAclHelper.grantAcl(userPermission, skipNamespaces, skipTables);
                        // 3. Record global acl is sync to HDFS
                        SnapshotScannerHDFSAclStorage.addUserGlobalHdfsAcl(aclTable, userName);
                    }
                } else {
                    // The merged user permission doesn't contain READ, so remove user global HDFS acls if
                    // it's set
                    removeUserGlobalHdfsAcl(aclTable, userName, userPermission);
                }
                break;
            case NAMESPACE:
                String namespace = ((NamespacePermission) userPermission.getPermission()).getNamespace();
                UserPermission nsPerm = getUserNamespacePermission(conf, userName, namespace);
                if (nsPerm != null && hdfsAclHelper.containReadAction(nsPerm)) {
                    if (!isHdfsAclSet(aclTable, userName, namespace)) {
                        // 1. Get tables which namespace user acls are already synced
                        Set<TableName> skipTables = SnapshotScannerHDFSAclStorage.getUserNamespaceAndTable(aclTable, userName).getSecond();
                        // 2. Add HDFS acl(skip tables directories whose acl is set)
                        hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), skipTables);
                    }
                    // 3. Record namespace acl is synced to HDFS
                    SnapshotScannerHDFSAclStorage.addUserNamespaceHdfsAcl(aclTable, userName, namespace);
                } else {
                    // The merged user permission doesn't contain READ, so remove user namespace HDFS acls
                    // if it's set
                    removeUserNamespaceHdfsAcl(aclTable, userName, namespace, userPermission);
                }
                break;
            case TABLE:
                TablePermission tablePerm = (TablePermission) userPermission.getPermission();
                if (needHandleTableHdfsAcl(tablePerm)) {
                    TableName tableName = tablePerm.getTableName();
                    UserPermission tPerm = getUserTablePermission(conf, userName, tableName);
                    if (tPerm != null && hdfsAclHelper.containReadAction(tPerm)) {
                        if (!isHdfsAclSet(aclTable, userName, tableName)) {
                            // 1. create table dirs
                            hdfsAclHelper.createTableDirectories(tableName);
                            // 2. Add HDFS acl
                            hdfsAclHelper.grantAcl(userPermission, new HashSet<>(0), new HashSet<>(0));
                        }
                        // 2. Record table acl is synced to HDFS
                        SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(aclTable, userName, tableName);
                    } else {
                        // The merged user permission doesn't contain READ, so remove user table HDFS acls if
                        // it's set
                        removeUserTableHdfsAcl(aclTable, userName, tableName, userPermission);
                    }
                }
                break;
            default:
                throw new IllegalArgumentException("Illegal user permission scope " + userPermission.getAccessScope());
        }
    }
}
Also used : Arrays(java.util.Arrays) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) UserProvider(org.apache.hadoop.hbase.security.UserProvider) Result(org.apache.hadoop.hbase.client.Result) LoggerFactory(org.slf4j.LoggerFactory) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) ArrayList(java.util.ArrayList) User(org.apache.hadoop.hbase.security.User) HashSet(java.util.HashSet) MasterCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment) Delete(org.apache.hadoop.hbase.client.Delete) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) Configuration(org.apache.hadoop.conf.Configuration) CoreCoprocessor(org.apache.hadoop.hbase.coprocessor.CoreCoprocessor) HasMasterServices(org.apache.hadoop.hbase.coprocessor.HasMasterServices) MasterServices(org.apache.hadoop.hbase.master.MasterServices) Path(org.apache.hadoop.fs.Path) Cell(org.apache.hadoop.hbase.Cell) Bytes(org.apache.hadoop.hbase.util.Bytes) Pair(org.apache.hadoop.hbase.util.Pair) TableName(org.apache.hadoop.hbase.TableName) Logger(org.slf4j.Logger) Put(org.apache.hadoop.hbase.client.Put) Get(org.apache.hadoop.hbase.client.Get) Set(java.util.Set) PathHelper(org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclHelper.PathHelper) Sets(org.apache.hbase.thirdparty.com.google.common.collect.Sets) IOException(java.io.IOException) HBaseInterfaceAudience(org.apache.hadoop.hbase.HBaseInterfaceAudience) Collectors(java.util.stream.Collectors) Scan(org.apache.hadoop.hbase.client.Scan) CellUtil(org.apache.hadoop.hbase.CellUtil) MasterObserver(org.apache.hadoop.hbase.coprocessor.MasterObserver) List(java.util.List) SnapshotDescription(org.apache.hadoop.hbase.client.SnapshotDescription) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Admin(org.apache.hadoop.hbase.client.Admin) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) MasterCoprocessor(org.apache.hadoop.hbase.coprocessor.MasterCoprocessor) Connection(org.apache.hadoop.hbase.client.Connection) ObserverContext(org.apache.hadoop.hbase.coprocessor.ObserverContext) Optional(java.util.Optional) Table(org.apache.hadoop.hbase.client.Table) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Table(org.apache.hadoop.hbase.client.Table) HashSet(java.util.HashSet) Set(java.util.Set) Configuration(org.apache.hadoop.conf.Configuration) TableName(org.apache.hadoop.hbase.TableName)

Example 5 with MasterCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment in project hbase by apache.

the class JMXListener method start.

@Override
public void start(CoprocessorEnvironment env) throws IOException {
    int rmiRegistryPort = -1;
    int rmiConnectorPort = -1;
    Configuration conf = env.getConfiguration();
    if (env instanceof MasterCoprocessorEnvironment) {
        // running on Master
        rmiRegistryPort = conf.getInt("master" + RMI_REGISTRY_PORT_CONF_KEY, defMasterRMIRegistryPort);
        rmiConnectorPort = conf.getInt("master" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort);
        LOG.info("Master rmiRegistryPort:" + rmiRegistryPort + ",Master rmiConnectorPort:" + rmiConnectorPort);
    } else if (env instanceof RegionServerCoprocessorEnvironment) {
        // running on RegionServer
        rmiRegistryPort = conf.getInt("regionserver" + RMI_REGISTRY_PORT_CONF_KEY, defRegionserverRMIRegistryPort);
        rmiConnectorPort = conf.getInt("regionserver" + RMI_CONNECTOR_PORT_CONF_KEY, rmiRegistryPort);
        LOG.info("RegionServer rmiRegistryPort:" + rmiRegistryPort + ",RegionServer rmiConnectorPort:" + rmiConnectorPort);
    } else if (env instanceof RegionCoprocessorEnvironment) {
        LOG.error("JMXListener should not be loaded in Region Environment!");
        return;
    }
    synchronized (JMXListener.class) {
        if (JMX_CS != null) {
            LOG.info("JMXListener has been started at Registry port " + rmiRegistryPort);
        } else {
            startConnectorServer(rmiRegistryPort, rmiConnectorPort);
        }
    }
}
Also used : RegionServerCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Configuration(org.apache.hadoop.conf.Configuration) MasterCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment)

Aggregations

MasterCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment)9 HasMasterServices (org.apache.hadoop.hbase.coprocessor.HasMasterServices)4 RegionServerCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment)4 Configuration (org.apache.hadoop.conf.Configuration)3 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)3 IOException (java.io.IOException)2 HashSet (java.util.HashSet)2 TableName (org.apache.hadoop.hbase.TableName)2 MasterServices (org.apache.hadoop.hbase.master.MasterServices)2 User (org.apache.hadoop.hbase.security.User)2 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 List (java.util.List)1 Optional (java.util.Optional)1 Set (java.util.Set)1 Collectors (java.util.stream.Collectors)1 Path (org.apache.hadoop.fs.Path)1 Cell (org.apache.hadoop.hbase.Cell)1 CellUtil (org.apache.hadoop.hbase.CellUtil)1