use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestSplitTableRegionProcedure method testSplitTableRegion.
@Test
public void testSplitTableRegion() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
RegionInfo[] regions = MasterProcedureTestingUtility.createTable(procExec, tableName, null, columnFamilyName1, columnFamilyName2);
insertData(UTIL, tableName, rowCount, startRowNum, columnFamilyName1, columnFamilyName2);
int splitRowNum = startRowNum + rowCount / 2;
byte[] splitKey = Bytes.toBytes("" + splitRowNum);
assertTrue("not able to find a splittable region", regions != null);
assertTrue("not able to find a splittable region", regions.length == 1);
// collect AM metrics before test
collectAssignmentManagerMetrics();
// Split region of the table
long procId = procExec.submitProcedure(new SplitTableRegionProcedure(procExec.getEnvironment(), regions[0], splitKey));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
verify(tableName, splitRowNum);
assertEquals(splitSubmittedCount + 1, splitProcMetrics.getSubmittedCounter().getCount());
assertEquals(splitFailedCount, splitProcMetrics.getFailedCounter().getCount());
assertEquals(assignSubmittedCount + 2, assignProcMetrics.getSubmittedCounter().getCount());
assertEquals(assignFailedCount, assignProcMetrics.getFailedCounter().getCount());
assertEquals(unassignSubmittedCount + 1, unassignProcMetrics.getSubmittedCounter().getCount());
assertEquals(unassignFailedCount, unassignProcMetrics.getFailedCounter().getCount());
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestAccessController method testGetProcedures.
@Test
public void testGetProcedures() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
Procedure proc = new TestTableDDLProcedure(procExec.getEnvironment(), tableName);
proc.setOwner(USER_OWNER);
procExec.submitProcedure(proc);
final List<Procedure<MasterProcedureEnv>> procList = procExec.getProcedures();
AccessTestAction getProceduresAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.postGetProcedures(ObserverContextImpl.createAndPrepare(CP_ENV));
return null;
}
};
verifyAllowed(getProceduresAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN);
verifyAllowed(getProceduresAction, USER_OWNER);
verifyIfNull(getProceduresAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE);
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class RSGroupInfoManagerImpl method migrate.
private void migrate(Collection<RSGroupInfo> groupList) {
TableDescriptors tds = masterServices.getTableDescriptors();
ProcedureExecutor<MasterProcedureEnv> procExec = masterServices.getMasterProcedureExecutor();
for (RSGroupInfo groupInfo : groupList) {
if (groupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
continue;
}
SortedSet<TableName> failedTables = new TreeSet<>();
List<MigrateRSGroupProcedure> procs = new ArrayList<>();
for (TableName tableName : groupInfo.getTables()) {
LOG.debug("Migrating {} in group {}", tableName, groupInfo.getName());
TableDescriptor oldTd;
try {
oldTd = tds.get(tableName);
} catch (IOException e) {
LOG.warn("Failed to migrate {} in group {}", tableName, groupInfo.getName(), e);
failedTables.add(tableName);
continue;
}
if (oldTd == null) {
continue;
}
if (oldTd.getRegionServerGroup().isPresent()) {
// either we have already migrated it or that user has set the rs group using the new
// code which will set the group directly on table descriptor, skip.
LOG.debug("Skip migrating {} since it is already in group {}", tableName, oldTd.getRegionServerGroup().get());
continue;
}
// This is a bit tricky. Since we know that the region server group config in
// TableDescriptor will only be used at master side, it is fine to just update the table
// descriptor on file system and also the cache, without reopening all the regions. This
// will be much faster than the normal modifyTable. And when upgrading, we will update
// master first and then region server, so after all the region servers has been reopened,
// the new TableDescriptor will be loaded.
MigrateRSGroupProcedure proc = new MigrateRSGroupProcedure(procExec.getEnvironment(), tableName);
procExec.submitProcedure(proc);
procs.add(proc);
}
for (MigrateRSGroupProcedure proc : procs) {
try {
ProcedureSyncWait.waitForProcedureToComplete(procExec, proc, 60000);
} catch (IOException e) {
LOG.warn("Failed to migrate rs group {} for table {}", groupInfo.getName(), proc.getTableName());
failedTables.add(proc.getTableName());
}
}
LOG.debug("Done migrating {}, failed tables {}", groupInfo.getName(), failedTables);
synchronized (RSGroupInfoManagerImpl.this) {
Map<String, RSGroupInfo> rsGroupMap = holder.groupName2Group;
RSGroupInfo currentInfo = rsGroupMap.get(groupInfo.getName());
if (currentInfo != null) {
RSGroupInfo newInfo = new RSGroupInfo(currentInfo.getName(), currentInfo.getServers(), failedTables);
Map<String, RSGroupInfo> newGroupMap = new HashMap<>(rsGroupMap);
newGroupMap.put(groupInfo.getName(), newInfo);
try {
flushConfig(newGroupMap);
} catch (IOException e) {
LOG.warn("Failed to persist rs group {}", newInfo.getName(), e);
}
}
}
}
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class ServerManager method processDeadServer.
public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
// the corresponding server is down. So we queue them up here instead.
if (!master.getAssignmentManager().isFailoverCleanupDone()) {
requeuedDeadServers.put(serverName, shouldSplitWal);
return;
}
this.deadservers.add(serverName);
ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(), serverName, shouldSplitWal, false));
}
use of org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv in project hbase by apache.
the class TestHBaseFsckOneRS method testCleanUpDaughtersNotInMetaAfterFailedSplit.
@Test(timeout = 180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
createTable(TEST_UTIL, desc, null);
tbl = connection.getTable(desc.getTableName());
for (int i = 0; i < 5; i++) {
Put p1 = new Put(("r" + i).getBytes());
p1.addColumn(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
tbl.put(p1);
}
admin.flush(desc.getTableName());
List<HRegion> regions = cluster.getRegions(desc.getTableName());
int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
HRegionServer regionServer = cluster.getRegionServer(serverWith);
byte[] parentRegionName = regions.get(0).getRegionInfo().getRegionName();
cluster.getServerWith(parentRegionName);
// Create daughters without adding to META table
MasterProcedureEnv env = cluster.getMaster().getMasterProcedureExecutor().getEnvironment();
SplitTableRegionProcedure splitR = new SplitTableRegionProcedure(env, regions.get(0).getRegionInfo(), Bytes.toBytes("r3"));
splitR.prepareSplitRegion(env);
splitR.setRegionStateToSplitting(env);
splitR.closeParentRegionForSplit(env);
splitR.createDaughterRegions(env);
AssignmentManager am = cluster.getMaster().getAssignmentManager();
for (RegionState state : am.getRegionStates().getRegionsInTransition()) {
am.regionOffline(state.getRegion());
}
Map<HRegionInfo, ServerName> regionsMap = new HashMap<>();
regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
am.assign(regionsMap);
am.waitForAssignment(regions.get(0).getRegionInfo());
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// holes are separate from overlap groups
assertEquals(0, hbck.getOverlapGroups(tableName).size());
// fix hole
assertErrors(doFsck(conf, false, true, false, false, false, false, false, false, false, false, false, false, null), new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
// check that hole fixed
assertNoErrors(doFsck(conf, false));
assertEquals(5, countRows());
} finally {
if (tbl != null) {
tbl.close();
tbl = null;
}
cleanupTable(tableName);
}
}
Aggregations