Search in sources :

Example 6 with RegionServerCoprocessorHost

use of org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost in project hbase by apache.

the class TestScanEarlyTermination method setupBeforeClass.

@BeforeClass
public static void setupBeforeClass() throws Exception {
    // setup configuration
    conf = TEST_UTIL.getConfiguration();
    conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
    // Enable security
    enableSecurity(conf);
    // Verify enableSecurity sets up what we require
    verifyConfiguration(conf);
    TEST_UTIL.startMiniCluster();
    MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
    cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
    AccessController ac = (AccessController) cpHost.findCoprocessor(AccessController.class.getName());
    cpHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf);
    RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
    rsHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf);
    // Wait for the ACL table to become available
    TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
    // create a set of test users
    USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
    USER_OTHER = User.createUserForTesting(conf, "other", new String[0]);
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) RegionServerCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost) BeforeClass(org.junit.BeforeClass)

Example 7 with RegionServerCoprocessorHost

use of org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost in project hbase by apache.

the class TestAccessController3 method setupBeforeClass.

@BeforeClass
public static void setupBeforeClass() throws Exception {
    // setup configuration
    conf = TEST_UTIL.getConfiguration();
    // Enable security
    enableSecurity(conf);
    String accessControllerClassName = FaultyAccessController.class.getName();
    // In this particular test case, we can't use SecureBulkLoadEndpoint because its doAs will fail
    // to move a file for a random user
    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, accessControllerClassName);
    // Verify enableSecurity sets up what we require
    verifyConfiguration(conf);
    // Enable EXEC permission checking
    conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
    TEST_UTIL.startMiniCluster();
    MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
    cpHost.load(FaultyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
    ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(accessControllerClassName);
    CP_ENV = cpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
    RegionServerCoprocessorHost rsHost;
    do {
        rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
    } while (rsHost == null);
    RSCP_ENV = rsHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
    // Wait for the ACL table to become available
    TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME);
    // create a set of test users
    SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });
    USER_ADMIN = User.createUserForTesting(conf, "admin2", new String[0]);
    USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);
    USER_RO = User.createUserForTesting(conf, "rouser", new String[0]);
    USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
    USER_CREATE = User.createUserForTesting(conf, "tbl_create", new String[0]);
    USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]);
    USER_ADMIN_CF = User.createUserForTesting(conf, "col_family_admin", new String[0]);
    USER_GROUP_ADMIN = User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN });
    USER_GROUP_CREATE = User.createUserForTesting(conf, "user_group_create", new String[] { GROUP_CREATE });
    USER_GROUP_READ = User.createUserForTesting(conf, "user_group_read", new String[] { GROUP_READ });
    USER_GROUP_WRITE = User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE });
    systemUserConnection = TEST_UTIL.getConnection();
    setUpTableAndUserPermissions();
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) RegionServerCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost) BeforeClass(org.junit.BeforeClass)

Example 8 with RegionServerCoprocessorHost

use of org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost in project hbase by apache.

the class TestCellACLWithMultipleVersions method setupBeforeClass.

@BeforeClass
public static void setupBeforeClass() throws Exception {
    // setup configuration
    conf = TEST_UTIL.getConfiguration();
    // Enable security
    enableSecurity(conf);
    // Verify enableSecurity sets up what we require
    verifyConfiguration(conf);
    // We expect 0.98 cell ACL semantics
    conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false);
    TEST_UTIL.startMiniCluster();
    MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
    cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
    AccessController ac = (AccessController) cpHost.findCoprocessor(AccessController.class.getName());
    cpHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf);
    RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
    rsHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf);
    // Wait for the ACL table to become available
    TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
    // create a set of test users
    USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
    USER_OTHER = User.createUserForTesting(conf, "other", new String[0]);
    USER_OTHER2 = User.createUserForTesting(conf, "other2", new String[0]);
    GROUP_USER = User.createUserForTesting(conf, "group_user", new String[] { GROUP });
    usersAndGroups = new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) };
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) RegionServerCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost) BeforeClass(org.junit.BeforeClass)

Example 9 with RegionServerCoprocessorHost

use of org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost in project hbase by apache.

the class ReplicationSourceManager method getReplicationSource.

/**
   * Factory method to create a replication source
   * @param conf the configuration to use
   * @param fs the file system to use
   * @param manager the manager to use
   * @param server the server object for this region server
   * @param peerId the id of the peer cluster
   * @return the created source
   * @throws IOException
   */
protected ReplicationSourceInterface getReplicationSource(final Configuration conf, final FileSystem fs, final ReplicationSourceManager manager, final ReplicationQueues replicationQueues, final ReplicationPeers replicationPeers, final Server server, final String peerId, final UUID clusterId, final ReplicationPeerConfig peerConfig, final ReplicationPeer replicationPeer) throws IOException {
    RegionServerCoprocessorHost rsServerHost = null;
    TableDescriptors tableDescriptors = null;
    if (server instanceof HRegionServer) {
        rsServerHost = ((HRegionServer) server).getRegionServerCoprocessorHost();
        tableDescriptors = ((HRegionServer) server).getTableDescriptors();
    }
    ReplicationSourceInterface src;
    try {
        @SuppressWarnings("rawtypes") Class c = Class.forName(conf.get("replication.replicationsource.implementation", ReplicationSource.class.getCanonicalName()));
        src = (ReplicationSourceInterface) c.newInstance();
    } catch (Exception e) {
        LOG.warn("Passed replication source implementation throws errors, " + "defaulting to ReplicationSource", e);
        src = new ReplicationSource();
    }
    ReplicationEndpoint replicationEndpoint = null;
    try {
        String replicationEndpointImpl = peerConfig.getReplicationEndpointImpl();
        if (replicationEndpointImpl == null) {
            // Default to HBase inter-cluster replication endpoint
            replicationEndpointImpl = HBaseInterClusterReplicationEndpoint.class.getName();
        }
        @SuppressWarnings("rawtypes") Class c = Class.forName(replicationEndpointImpl);
        replicationEndpoint = (ReplicationEndpoint) c.newInstance();
        if (rsServerHost != null) {
            ReplicationEndpoint newReplicationEndPoint = rsServerHost.postCreateReplicationEndPoint(replicationEndpoint);
            if (newReplicationEndPoint != null) {
                // Override the newly created endpoint from the hook with configured end point
                replicationEndpoint = newReplicationEndPoint;
            }
        }
    } catch (Exception e) {
        LOG.warn("Passed replication endpoint implementation throws errors" + " while initializing ReplicationSource for peer: " + peerId, e);
        throw new IOException(e);
    }
    MetricsSource metrics = new MetricsSource(peerId);
    // init replication source
    src.init(conf, fs, manager, replicationQueues, replicationPeers, server, peerId, clusterId, replicationEndpoint, metrics);
    // init replication endpoint
    replicationEndpoint.init(new ReplicationEndpoint.Context(replicationPeer.getConfiguration(), fs, peerId, clusterId, replicationPeer, metrics, tableDescriptors, server));
    return src;
}
Also used : IOException(java.io.IOException) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) IOException(java.io.IOException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) RegionServerCoprocessorHost(org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) TableDescriptors(org.apache.hadoop.hbase.TableDescriptors)

Aggregations

RegionServerCoprocessorHost (org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost)9 MasterCoprocessorHost (org.apache.hadoop.hbase.master.MasterCoprocessorHost)7 BeforeClass (org.junit.BeforeClass)6 Configuration (org.apache.hadoop.conf.Configuration)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)1 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)1 TableDescriptors (org.apache.hadoop.hbase.TableDescriptors)1 MasterCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment)1 RegionServerCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment)1 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)1 RegionServerServices (org.apache.hadoop.hbase.regionserver.RegionServerServices)1 ReplicationEndpoint (org.apache.hadoop.hbase.replication.ReplicationEndpoint)1 ReplicationException (org.apache.hadoop.hbase.replication.ReplicationException)1