use of org.apache.hadoop.hbase.NamespaceDescriptor in project hbase by apache.
the class RSGroupAdminServer method removeRSGroup.
@Override
public void removeRSGroup(String name) throws IOException {
// another writer changing our state while we are working.
synchronized (rsGroupInfoManager) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preRemoveRSGroup(name);
}
RSGroupInfo rsGroupInfo = rsGroupInfoManager.getRSGroup(name);
if (rsGroupInfo == null) {
throw new ConstraintException("RSGroup " + name + " does not exist");
}
int tableCount = rsGroupInfo.getTables().size();
if (tableCount > 0) {
throw new ConstraintException("RSGroup " + name + " has " + tableCount + " tables; you must remove these tables from the rsgroup before " + "the rsgroup can be removed.");
}
int serverCount = rsGroupInfo.getServers().size();
if (serverCount > 0) {
throw new ConstraintException("RSGroup " + name + " has " + serverCount + " servers; you must remove these servers from the RSGroup before" + "the RSGroup can be removed.");
}
for (NamespaceDescriptor ns : master.getClusterSchema().getNamespaces()) {
String nsGroup = ns.getConfigurationValue(rsGroupInfo.NAMESPACE_DESC_PROP_GROUP);
if (nsGroup != null && nsGroup.equals(name)) {
throw new ConstraintException("RSGroup " + name + " is referenced by namespace: " + ns.getName());
}
}
rsGroupInfoManager.removeRSGroup(name);
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postRemoveRSGroup(name);
}
}
}
use of org.apache.hadoop.hbase.NamespaceDescriptor in project hbase by apache.
the class AccessControlClient method getUserPermissions.
/**
* List all the userPermissions matching the given pattern. If pattern is null, the behavior is
* dependent on whether user has global admin privileges or not. If yes, the global permissions
* along with the list of superusers would be returned. Else, no rows get returned.
* @param connection The Connection instance to use
* @param tableRegex The regular expression string to match against
* @return - returns an array of UserPermissions
* @throws Throwable
*/
public static List<UserPermission> getUserPermissions(Connection connection, String tableRegex) throws Throwable {
/** TODO: Pass an rpcController
HBaseRpcController controller
= ((ClusterConnection) connection).getRpcControllerFactory().newController();
*/
List<UserPermission> permList = new ArrayList<>();
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
try (Admin admin = connection.getAdmin()) {
CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW);
BlockingInterface protocol = AccessControlProtos.AccessControlService.newBlockingStub(service);
HTableDescriptor[] htds = null;
if (tableRegex == null || tableRegex.isEmpty()) {
permList = AccessControlUtil.getUserPermissions(null, protocol);
} else if (tableRegex.charAt(0) == '@') {
// Namespaces
String namespaceRegex = tableRegex.substring(1);
for (NamespaceDescriptor nsds : admin.listNamespaceDescriptors()) {
// Read out all namespaces
String namespace = nsds.getName();
if (namespace.matches(namespaceRegex)) {
// Match the given namespace regex?
permList.addAll(AccessControlUtil.getUserPermissions(null, protocol, Bytes.toBytes(namespace)));
}
}
} else {
// Tables
htds = admin.listTables(Pattern.compile(tableRegex), true);
for (HTableDescriptor hd : htds) {
permList.addAll(AccessControlUtil.getUserPermissions(null, protocol, hd.getTableName()));
}
}
}
}
return permList;
}
use of org.apache.hadoop.hbase.NamespaceDescriptor in project hbase by apache.
the class TestMobCompactor method testMinorCompaction.
@Test(timeout = 300000)
public void testMinorCompaction() throws Exception {
resetConf();
int mergeSize = 5000;
// change the mob compaction merge size
conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
// create a table with namespace
NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create("ns").build();
String tableNameAsString = "ns:testMinorCompaction";
admin.createNamespace(namespaceDescriptor);
setUp(tableNameAsString);
int count = 4;
// generate mob files
loadData(admin, bufMut, tableName, count, rowNumPerFile);
int rowNumPerRegion = count * rowNumPerFile;
assertEquals("Before deleting: mob rows count", regionNum * rowNumPerRegion, countMobRows(table));
assertEquals("Before deleting: mob cells count", regionNum * cellNumPerRow * rowNumPerRegion, countMobCells(table));
assertEquals("Before deleting: mob file count", regionNum * count, countFiles(tableName, true, family1));
int largeFilesCount = countLargeFiles(mergeSize, tableName, family1);
createDelFile(table, tableName, Bytes.toBytes(family1), Bytes.toBytes(qf1));
assertEquals("Before compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum), countMobRows(table));
assertEquals("Before compaction: mob cells count", regionNum * (cellNumPerRow * rowNumPerRegion - delCellNum), countMobCells(table));
assertEquals("Before compaction: family1 mob file count", regionNum * count, countFiles(tableName, true, family1));
assertEquals("Before compaction: family2 mob file count", regionNum * count, countFiles(tableName, true, family2));
assertEquals("Before compaction: family1 del file count", regionNum, countFiles(tableName, false, family1));
assertEquals("Before compaction: family2 del file count", regionNum, countFiles(tableName, false, family2));
// do the mob file compaction
MobCompactor compactor = new PartitionedMobCompactor(conf, fs, tableName, hcd1, pool);
compactor.compact();
assertEquals("After compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum), countMobRows(table));
assertEquals("After compaction: mob cells count", regionNum * (cellNumPerRow * rowNumPerRegion - delCellNum), countMobCells(table));
// After the compaction, the files smaller than the mob compaction merge size
// is merge to one file
assertEquals("After compaction: family1 mob file count", largeFilesCount + regionNum, countFiles(tableName, true, family1));
assertEquals("After compaction: family2 mob file count", regionNum * count, countFiles(tableName, true, family2));
assertEquals("After compaction: family1 del file count", regionNum, countFiles(tableName, false, family1));
assertEquals("After compaction: family2 del file count", regionNum, countFiles(tableName, false, family2));
}
use of org.apache.hadoop.hbase.NamespaceDescriptor in project hbase by apache.
the class TestSimpleRegionNormalizerOnCluster method testRegionNormalizationSplitOnCluster.
void testRegionNormalizationSplitOnCluster(boolean limitedByQuota) throws Exception {
TableName TABLENAME;
if (limitedByQuota) {
String nsp = "np2";
NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5").addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
admin.createNamespace(nspDesc);
TABLENAME = TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + name.getMethodName());
} else {
TABLENAME = TableName.valueOf(name.getMethodName());
}
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster m = cluster.getMaster();
try (Table ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILYNAME, 5)) {
// Need to get sorted list of regions here
List<HRegion> generatedRegions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME);
Collections.sort(generatedRegions, new Comparator<HRegion>() {
@Override
public int compare(HRegion o1, HRegion o2) {
return o1.getRegionInfo().compareTo(o2.getRegionInfo());
}
});
HRegion region = generatedRegions.get(0);
generateTestData(region, 1);
region.flush(true);
region = generatedRegions.get(1);
generateTestData(region, 1);
region.flush(true);
region = generatedRegions.get(2);
generateTestData(region, 2);
region.flush(true);
region = generatedRegions.get(3);
generateTestData(region, 2);
region.flush(true);
region = generatedRegions.get(4);
generateTestData(region, 5);
region.flush(true);
}
HTableDescriptor htd = admin.getTableDescriptor(TABLENAME);
htd.setNormalizationEnabled(true);
admin.modifyTable(TABLENAME, htd);
admin.flush(TABLENAME);
assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), TABLENAME));
// Now trigger a split and stop when the split is in progress
// to let region load to update
Thread.sleep(5000);
m.normalizeRegions();
if (limitedByQuota) {
long skippedSplitcnt = 0;
do {
skippedSplitcnt = m.getRegionNormalizer().getSkippedCount(PlanType.SPLIT);
Thread.sleep(100);
} while (skippedSplitcnt == 0L);
assert (skippedSplitcnt > 0);
} else {
while (true) {
List<HRegion> regions = TEST_UTIL.getHBaseCluster().getRegions(TABLENAME);
int cnt = 0;
for (HRegion region : regions) {
String regionName = region.getRegionInfo().getRegionNameAsString();
if (regionName.startsWith("testRegionNormalizationSplitOnCluster,zzzzz")) {
cnt++;
}
}
if (cnt >= 2) {
break;
}
}
}
admin.disableTable(TABLENAME);
admin.deleteTable(TABLENAME);
}
use of org.apache.hadoop.hbase.NamespaceDescriptor in project hbase by apache.
the class TableNamespaceManager method list.
public synchronized NavigableSet<NamespaceDescriptor> list() throws IOException {
NavigableSet<NamespaceDescriptor> ret = Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
ResultScanner scanner = getNamespaceTable().getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
try {
for (Result r : scanner) {
byte[] val = CellUtil.cloneValue(r.getColumnLatestCell(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
ret.add(ProtobufUtil.toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(val)));
}
} finally {
scanner.close();
}
return ret;
}
Aggregations