use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class ServerCrashProcedure method executeFromState.
@Override
protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) throws ProcedureSuspendedException, ProcedureYieldException {
final MasterServices services = env.getMasterServices();
final AssignmentManager am = env.getAssignmentManager();
updateProgress(true);
// Server gets removed from processing list below on procedure successful finish.
if (!notifiedDeadServer) {
notifiedDeadServer = true;
}
switch(state) {
case SERVER_CRASH_START:
case SERVER_CRASH_SPLIT_META_LOGS:
case SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR:
case SERVER_CRASH_ASSIGN_META:
break;
default:
// If hbase:meta is not assigned, yield.
if (env.getAssignmentManager().waitMetaLoaded(this)) {
throw new ProcedureSuspendedException();
}
}
try {
switch(state) {
case SERVER_CRASH_START:
LOG.info("Start " + this);
// If carrying meta, process it first. Else, get list of regions on crashed server.
if (this.carryingMeta) {
setNextState(ServerCrashState.SERVER_CRASH_SPLIT_META_LOGS);
} else {
setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS);
}
break;
case SERVER_CRASH_SPLIT_META_LOGS:
if (env.getMasterConfiguration().getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
zkCoordinatedSplitMetaLogs(env);
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN_META);
} else {
am.getRegionStates().metaLogSplitting(serverName);
addChildProcedure(createSplittingWalProcedures(env, true));
setNextState(ServerCrashState.SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR);
}
break;
case SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR:
if (isSplittingDone(env, true)) {
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN_META);
am.getRegionStates().metaLogSplit(serverName);
} else {
setNextState(ServerCrashState.SERVER_CRASH_SPLIT_META_LOGS);
}
break;
case SERVER_CRASH_ASSIGN_META:
assignRegions(env, Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO));
setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS);
break;
case SERVER_CRASH_GET_REGIONS:
this.regionsOnCrashedServer = getRegionsOnCrashedServer(env);
// if we should do distributed log splitting.
if (regionsOnCrashedServer != null) {
LOG.info("{} had {} regions", serverName, regionsOnCrashedServer.size());
if (LOG.isTraceEnabled()) {
this.regionsOnCrashedServer.stream().forEach(ri -> LOG.trace(ri.getShortNameToLog()));
}
}
if (!this.shouldSplitWal) {
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
} else {
setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
}
break;
case SERVER_CRASH_SPLIT_LOGS:
if (env.getMasterConfiguration().getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
zkCoordinatedSplitLogs(env);
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
} else {
am.getRegionStates().logSplitting(this.serverName);
addChildProcedure(createSplittingWalProcedures(env, false));
setNextState(ServerCrashState.SERVER_CRASH_DELETE_SPLIT_WALS_DIR);
}
break;
case SERVER_CRASH_DELETE_SPLIT_WALS_DIR:
if (isSplittingDone(env, false)) {
cleanupSplitDir(env);
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
am.getRegionStates().logSplit(this.serverName);
} else {
setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
}
break;
case SERVER_CRASH_ASSIGN:
// Filter changes this.regionsOnCrashedServer.
if (filterDefaultMetaRegions()) {
if (LOG.isTraceEnabled()) {
LOG.trace("Assigning regions " + RegionInfo.getShortNameToLog(regionsOnCrashedServer) + ", " + this + "; cycles=" + getCycles());
}
assignRegions(env, regionsOnCrashedServer);
}
setNextState(ServerCrashState.SERVER_CRASH_CLAIM_REPLICATION_QUEUES);
break;
case SERVER_CRASH_HANDLE_RIT2:
// Noop. Left in place because we used to call handleRIT here for a second time
// but no longer necessary since HBASE-20634.
setNextState(ServerCrashState.SERVER_CRASH_CLAIM_REPLICATION_QUEUES);
break;
case SERVER_CRASH_CLAIM_REPLICATION_QUEUES:
addChildProcedure(new ClaimReplicationQueuesProcedure(serverName));
setNextState(ServerCrashState.SERVER_CRASH_FINISH);
break;
case SERVER_CRASH_FINISH:
LOG.info("removed crashed server {} after splitting done", serverName);
services.getAssignmentManager().getRegionStates().removeServer(serverName);
updateProgress(true);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);
}
} catch (IOException e) {
LOG.warn("Failed state=" + state + ", retry " + this + "; cycles=" + getCycles(), e);
}
return Flow.HAS_MORE_STATE;
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class RSGroupUtil method listTablesInRSGroup.
public static List<TableName> listTablesInRSGroup(MasterServices master, String groupName) throws IOException {
List<TableName> tables = new ArrayList<>();
boolean isDefaultGroup = RSGroupInfo.DEFAULT_GROUP.equals(groupName);
for (TableDescriptor td : master.getTableDescriptors().getAll().values()) {
// no config means in default group
if (RSGroupUtil.getRSGroupInfo(master, master.getRSGroupInfoManager(), td.getTableName()).map(g -> g.getName().equals(groupName)).orElse(isDefaultGroup)) {
tables.add(td.getTableName());
}
}
return tables;
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class TestCoprocessorConfiguration method testMasterCoprocessorHostDefaults.
@Test
public void testMasterCoprocessorHostDefaults() throws Exception {
Configuration conf = new Configuration(CONF);
MasterServices masterServices = mock(MasterServices.class);
systemCoprocessorLoaded.set(false);
new MasterCoprocessorHost(masterServices, conf);
assertEquals("System coprocessors loading default was not honored", CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get());
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class TestMasterQuotaManager method testUninitializedQuotaManangerDoesNotFail.
@Test
public void testUninitializedQuotaManangerDoesNotFail() {
MasterServices masterServices = mock(MasterServices.class);
MasterQuotaManager manager = new MasterQuotaManager(masterServices);
manager.addRegionSize(null, 0, 0);
assertNotNull(manager.snapshotRegionSizes());
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class TestMasterQuotaManager method testOldEntriesRemoved.
@Test
public void testOldEntriesRemoved() {
MasterServices masterServices = mock(MasterServices.class);
MasterQuotaManager manager = new MasterQuotaManager(masterServices);
manager.initializeRegionSizes();
// Mock out some regions
TableName tableName = TableName.valueOf("foo");
RegionInfo region1 = createRegionInfo(tableName, null, toBytes("a"));
RegionInfo region2 = createRegionInfo(tableName, toBytes("a"), toBytes("b"));
RegionInfo region3 = createRegionInfo(tableName, toBytes("b"), toBytes("c"));
RegionInfo region4 = createRegionInfo(tableName, toBytes("c"), toBytes("d"));
RegionInfo region5 = createRegionInfo(tableName, toBytes("d"), null);
final long size = 0;
long time1 = 10;
manager.addRegionSize(region1, size, time1);
manager.addRegionSize(region2, size, time1);
long time2 = 20;
manager.addRegionSize(region3, size, time2);
manager.addRegionSize(region4, size, time2);
long time3 = 30;
manager.addRegionSize(region5, size, time3);
assertEquals(5, manager.snapshotRegionSizes().size());
QuotaObserverChore chore = mock(QuotaObserverChore.class);
// Prune nothing
assertEquals(0, manager.pruneEntriesOlderThan(0, chore));
assertEquals(5, manager.snapshotRegionSizes().size());
assertEquals(0, manager.pruneEntriesOlderThan(10, chore));
assertEquals(5, manager.snapshotRegionSizes().size());
// Prune the elements at time1
assertEquals(2, manager.pruneEntriesOlderThan(15, chore));
assertEquals(3, manager.snapshotRegionSizes().size());
// Prune the elements at time2
assertEquals(2, manager.pruneEntriesOlderThan(30, chore));
assertEquals(1, manager.snapshotRegionSizes().size());
}
Aggregations