use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project hbase by apache.
the class TestCoprocessorInterface method reopenRegion.
Region reopenRegion(final Region closedRegion, Class<?>... implClasses) throws IOException {
//HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Region r = HRegion.openHRegion(closedRegion, null);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
Configuration conf = TEST_UTIL.getConfiguration();
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
((HRegion) r).setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
}
// we need to manually call pre- and postOpen here since the
// above load() is not the real case for CP loading. A CP is
// expected to be loaded by default from 1) configuration; or 2)
// HTableDescriptor. If it's loaded after HRegion initialized,
// the pre- and postOpen() won't be triggered automatically.
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project phoenix by apache.
the class PhoenixAccessController method getAccessControllers.
private List<BaseMasterAndRegionObserver> getAccessControllers() throws IOException {
if (accessControllers == null) {
synchronized (this) {
if (accessControllers == null) {
accessControllers = new ArrayList<BaseMasterAndRegionObserver>();
RegionCoprocessorHost cpHost = this.env.getCoprocessorHost();
List<BaseMasterAndRegionObserver> coprocessors = cpHost.findCoprocessors(BaseMasterAndRegionObserver.class);
for (BaseMasterAndRegionObserver cp : coprocessors) {
if (cp instanceof AccessControlService.Interface) {
accessControllers.add(cp);
}
}
}
}
}
return accessControllers;
}
use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project hbase by apache.
the class TestCoreRegionCoprocessor method testCoreRegionCoprocessor.
/**
* Assert that when a Coprocessor is annotated with CoreCoprocessor, then it is possible to
* access a RegionServerServices instance. Assert the opposite too.
* Do it to RegionCoprocessors.
* @throws IOException
*/
@Test
public void testCoreRegionCoprocessor() throws IOException {
RegionCoprocessorHost rch = region.getCoprocessorHost();
RegionCoprocessorEnvironment env = rch.load(null, NotCoreRegionCoprocessor.class.getName(), 0, HTU.getConfiguration());
assertFalse(env instanceof HasRegionServerServices);
env = rch.load(null, CoreRegionCoprocessor.class.getName(), 1, HTU.getConfiguration());
assertTrue(env instanceof HasRegionServerServices);
assertEquals(this.rss, ((HasRegionServerServices) env).getRegionServerServices());
}
use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project hbase by apache.
the class TestWithDisabledAuthorization method setUp.
@Before
public void setUp() throws Exception {
// Create the test table (owner added to the _acl_ table)
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(testTable.getTableName()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(TEST_FAMILY).setMaxVersions(100).build()).build();
createTable(TEST_UTIL, USER_OWNER, tableDescriptor, new byte[][] { Bytes.toBytes("s") });
TEST_UTIL.waitUntilAllRegionsAssigned(testTable.getTableName());
HRegion region = TEST_UTIL.getHBaseCluster().getRegions(testTable.getTableName()).get(0);
RegionCoprocessorHost rcpHost = region.getCoprocessorHost();
RCP_ENV = rcpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, TEST_UTIL.getConfiguration());
// Set up initial grants
grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN, Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RW.getShortName(), testTable.getTableName(), TEST_FAMILY, null, Permission.Action.READ, Permission.Action.WRITE);
// USER_CREATE is USER_RW plus CREATE permissions
grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), testTable.getTableName(), null, null, Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RO.getShortName(), testTable.getTableName(), TEST_FAMILY, null, Permission.Action.READ);
grantOnTable(TEST_UTIL, USER_QUAL.getShortName(), testTable.getTableName(), TEST_FAMILY, TEST_Q1, Permission.Action.READ, Permission.Action.WRITE);
assertEquals(5, PermissionStorage.getTablePermissions(TEST_UTIL.getConfiguration(), testTable.getTableName()).size());
}
use of org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost in project hbase by apache.
the class TestRegionObserverScannerOpenHook method testRegionObserverFlushTimeStacking.
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
// Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
Configuration conf = new HBaseTestingUtil().getConfiguration();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
// put a row and flush it to disk
Put put = new Put(ROW);
put.addColumn(A, A, A);
region.put(put);
region.flush(true);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull("Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: " + r, r.listCells());
HBaseTestingUtil.closeRegionAndWAL(region);
}
Aggregations