use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestCoprocessorConfiguration method testMasterCoprocessorHostDefaults.
@Test
public void testMasterCoprocessorHostDefaults() throws Exception {
Configuration conf = new Configuration(CONF);
MasterServices masterServices = mock(MasterServices.class);
systemCoprocessorLoaded.set(false);
new MasterCoprocessorHost(masterServices, conf);
assertEquals("System coprocessors loading default was not honored", CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get());
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestCoreMasterCoprocessor method before.
@Before
public void before() throws IOException {
String methodName = this.name.getMethodName();
this.ms = new MockMasterServices(HTU.getConfiguration(), null);
this.mch = new MasterCoprocessorHost(this.ms, HTU.getConfiguration());
this.mch.preMasterInitialization();
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestNamespaceAuditor method testRegionMerge.
@Test
public void testRegionMerge() throws Exception {
String nsp1 = prefix + "_regiontest";
final int initialRegions = 3;
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "" + initialRegions).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
ADMIN.createNamespace(nspDesc);
final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
byte[] columnFamily = Bytes.toBytes("info");
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableTwo).setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnFamily)).build();
ADMIN.createTable(tableDescriptor, Bytes.toBytes("0"), Bytes.toBytes("9"), initialRegions);
Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
try (Table table = connection.getTable(tableTwo)) {
UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
}
ADMIN.flush(tableTwo);
List<RegionInfo> hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
Future<?> f = ADMIN.mergeRegionsAsync(hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
f.get(10, TimeUnit.SECONDS);
hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions - 1, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
byte[] splitKey = Bytes.toBytes("3");
HRegion regionToSplit = UTIL.getMiniHBaseCluster().getRegions(tableTwo).stream().filter(r -> r.getRegionInfo().containsRow(splitKey)).findFirst().get();
regionToSplit.compact(true);
// Waiting for compaction to finish
UTIL.waitFor(30000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return (CompactionState.NONE == ADMIN.getCompactionStateForRegion(regionToSplit.getRegionInfo().getRegionName()));
}
});
// Cleaning compacted references for split to proceed
regionToSplit.getStores().stream().forEach(s -> {
try {
s.closeAndArchiveCompactedFiles();
} catch (IOException e1) {
LOG.error("Error whiling cleaning compacted file");
}
});
// the above compact may quit immediately if there is a compaction ongoing, so here we need to
// wait a while to let the ongoing compaction finish.
UTIL.waitFor(10000, regionToSplit::isSplittable);
ADMIN.splitRegionAsync(regionToSplit.getRegionInfo().getRegionName(), splitKey).get(10, TimeUnit.SECONDS);
hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
// Fail region merge through Coprocessor hook
SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost();
Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class);
CPMasterObserver masterObserver = (CPMasterObserver) coprocessor;
masterObserver.failMerge(true);
f = ADMIN.mergeRegionsAsync(hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
try {
f.get(10, TimeUnit.SECONDS);
fail("Merge was supposed to fail!");
} catch (ExecutionException ee) {
// Expected.
}
hris = ADMIN.getRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris, RegionInfo.COMPARATOR);
// verify that we cannot split
try {
ADMIN.split(tableTwo, Bytes.toBytes("6"));
fail();
} catch (DoNotRetryRegionException e) {
// Expected
}
Thread.sleep(2000);
assertEquals(initialRegions, ADMIN.getRegions(tableTwo).size());
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestAccessController2 method testCoprocessorLoading.
@Test
public void testCoprocessorLoading() throws Exception {
MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
cpHost.load(MyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
AccessController ACCESS_CONTROLLER = cpHost.findCoprocessor(MyAccessController.class);
MasterCoprocessorEnvironment CP_ENV = cpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
RegionServerCoprocessorEnvironment RSCP_ENV = rsHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestAccessController3 method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
// setup configuration
conf = TEST_UTIL.getConfiguration();
// Enable security
enableSecurity(conf);
String accessControllerClassName = FaultyAccessController.class.getName();
// In this particular test case, we can't use SecureBulkLoadEndpoint because its doAs will fail
// to move a file for a random user
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, accessControllerClassName);
// Verify enableSecurity sets up what we require
verifyConfiguration(conf);
// Enable EXEC permission checking
conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
TEST_UTIL.startMiniCluster();
MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
cpHost.load(FaultyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(accessControllerClassName);
CP_ENV = cpHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
RegionServerCoprocessorHost rsHost;
do {
rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
} while (rsHost == null);
RSCP_ENV = rsHost.createEnvironment(ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
// Wait for the ACL table to become available
TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME);
// create a set of test users
SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });
USER_ADMIN = User.createUserForTesting(conf, "admin2", new String[0]);
USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);
USER_RO = User.createUserForTesting(conf, "rouser", new String[0]);
USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
USER_CREATE = User.createUserForTesting(conf, "tbl_create", new String[0]);
USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]);
USER_ADMIN_CF = User.createUserForTesting(conf, "col_family_admin", new String[0]);
USER_GROUP_ADMIN = User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN });
USER_GROUP_CREATE = User.createUserForTesting(conf, "user_group_create", new String[] { GROUP_CREATE });
USER_GROUP_READ = User.createUserForTesting(conf, "user_group_read", new String[] { GROUP_READ });
USER_GROUP_WRITE = User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE });
// Grant table creation permission to USER_OWNER
grantGlobal(TEST_UTIL, USER_OWNER.getShortName(), Permission.Action.CREATE);
systemUserConnection = TEST_UTIL.getConnection();
setUpTableAndUserPermissions();
}
Aggregations