use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestScanEarlyTermination method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
// setup configuration
conf = TEST_UTIL.getConfiguration();
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
// Enable security
enableSecurity(conf);
// Verify enableSecurity sets up what we require
verifyConfiguration(conf);
TEST_UTIL.startMiniCluster();
MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
AccessController ac = (AccessController) cpHost.findCoprocessor(AccessController.class.getName());
cpHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf);
RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
rsHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf);
// Wait for the ACL table to become available
TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
// create a set of test users
USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
USER_OTHER = User.createUserForTesting(conf, "other", new String[0]);
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestAccessController3 method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
// setup configuration
conf = TEST_UTIL.getConfiguration();
// Enable security
enableSecurity(conf);
String accessControllerClassName = FaultyAccessController.class.getName();
// In this particular test case, we can't use SecureBulkLoadEndpoint because its doAs will fail
// to move a file for a random user
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, accessControllerClassName);
// Verify enableSecurity sets up what we require
verifyConfiguration(conf);
// Enable EXEC permission checking
conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
TEST_UTIL.startMiniCluster();
MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
cpHost.load(FaultyAccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(accessControllerClassName);
CP_ENV = cpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
RegionServerCoprocessorHost rsHost;
do {
rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
} while (rsHost == null);
RSCP_ENV = rsHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf);
// Wait for the ACL table to become available
TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME);
// create a set of test users
SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" });
USER_ADMIN = User.createUserForTesting(conf, "admin2", new String[0]);
USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);
USER_RO = User.createUserForTesting(conf, "rouser", new String[0]);
USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
USER_CREATE = User.createUserForTesting(conf, "tbl_create", new String[0]);
USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]);
USER_ADMIN_CF = User.createUserForTesting(conf, "col_family_admin", new String[0]);
USER_GROUP_ADMIN = User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN });
USER_GROUP_CREATE = User.createUserForTesting(conf, "user_group_create", new String[] { GROUP_CREATE });
USER_GROUP_READ = User.createUserForTesting(conf, "user_group_read", new String[] { GROUP_READ });
USER_GROUP_WRITE = User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE });
systemUserConnection = TEST_UTIL.getConnection();
setUpTableAndUserPermissions();
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestCellACLWithMultipleVersions method setupBeforeClass.
@BeforeClass
public static void setupBeforeClass() throws Exception {
// setup configuration
conf = TEST_UTIL.getConfiguration();
// Enable security
enableSecurity(conf);
// Verify enableSecurity sets up what we require
verifyConfiguration(conf);
// We expect 0.98 cell ACL semantics
conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false);
TEST_UTIL.startMiniCluster();
MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
AccessController ac = (AccessController) cpHost.findCoprocessor(AccessController.class.getName());
cpHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf);
RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getRegionServerCoprocessorHost();
rsHost.createEnvironment(AccessController.class, ac, Coprocessor.PRIORITY_HIGHEST, 1, conf);
// Wait for the ACL table to become available
TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
// create a set of test users
USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
USER_OTHER = User.createUserForTesting(conf, "other", new String[0]);
USER_OTHER2 = User.createUserForTesting(conf, "other2", new String[0]);
GROUP_USER = User.createUserForTesting(conf, "group_user", new String[] { GROUP });
usersAndGroups = new String[] { USER_OTHER.getShortName(), AuthUtil.toGroupEntry(GROUP) };
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class TestNamespaceAuditor method testRegionMerge.
@Test
public void testRegionMerge() throws Exception {
String nsp1 = prefix + "_regiontest";
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "3").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
ADMIN.createNamespace(nspDesc);
final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
byte[] columnFamily = Bytes.toBytes("info");
HTableDescriptor tableDescOne = new HTableDescriptor(tableTwo);
tableDescOne.addFamily(new HColumnDescriptor(columnFamily));
final int initialRegions = 3;
ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions);
Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
try (Table table = connection.getTable(tableTwo)) {
UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
}
ADMIN.flush(tableTwo);
List<HRegionInfo> hris = ADMIN.getTableRegions(tableTwo);
Collections.sort(hris);
// merge the two regions
final Set<String> encodedRegionNamesToMerge = Sets.newHashSet(hris.get(0).getEncodedName(), hris.get(1).getEncodedName());
ADMIN.mergeRegionsAsync(hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
return false;
}
if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
return false;
}
}
return true;
}
@Override
public String explainFailure() throws Exception {
RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
return hri + " which is expected to be merged is still online";
}
if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
return hri + " is still in not opened";
}
}
return "Unknown";
}
});
hris = ADMIN.getTableRegions(tableTwo);
assertEquals(initialRegions - 1, hris.size());
Collections.sort(hris);
final HRegionInfo hriToSplit = hris.get(1);
ADMIN.split(tableTwo, Bytes.toBytes("500"));
UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
return false;
}
if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
return false;
}
}
return true;
}
@Override
public String explainFailure() throws Exception {
RegionStates regionStates = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
return hriToSplit + " which is expected to be split is still online";
}
if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
return hri + " is still in not opened";
}
}
return "Unknown";
}
});
hris = ADMIN.getTableRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris);
// fail region merge through Coprocessor hook
MiniHBaseCluster cluster = UTIL.getHBaseCluster();
MasterCoprocessorHost cpHost = cluster.getMaster().getMasterCoprocessorHost();
Coprocessor coprocessor = cpHost.findCoprocessor(CPMasterObserver.class.getName());
CPMasterObserver masterObserver = (CPMasterObserver) coprocessor;
masterObserver.failMerge(true);
masterObserver.triggered = false;
ADMIN.mergeRegionsAsync(hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
masterObserver.waitUtilTriggered();
hris = ADMIN.getTableRegions(tableTwo);
assertEquals(initialRegions, hris.size());
Collections.sort(hris);
// verify that we cannot split
HRegionInfo hriToSplit2 = hris.get(1);
ADMIN.split(tableTwo, TableInputFormatBase.getSplitKey(hriToSplit2.getStartKey(), hriToSplit2.getEndKey(), true));
Thread.sleep(2000);
assertEquals(initialRegions, ADMIN.getTableRegions(tableTwo).size());
}
use of org.apache.hadoop.hbase.master.MasterCoprocessorHost in project hbase by apache.
the class CloneSnapshotProcedure method postCloneSnapshot.
/**
* Action after cloning from snapshot.
* @param env MasterProcedureEnv
* @throws IOException
* @throws InterruptedException
*/
private void postCloneSnapshot(final MasterProcedureEnv env) throws IOException, InterruptedException {
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
final HRegionInfo[] regions = (newRegions == null) ? null : newRegions.toArray(new HRegionInfo[newRegions.size()]);
cpHost.postCompletedCreateTableAction(hTableDescriptor, regions, getUser());
}
}
Aggregations