use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class ServerCrashProcedure method executeFromState.
@Override
protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) throws ProcedureYieldException {
if (LOG.isTraceEnabled()) {
LOG.trace(state);
}
// Keep running count of cycles
if (state.ordinal() != this.previousState) {
this.previousState = state.ordinal();
this.cycles = 0;
} else {
this.cycles++;
}
MasterServices services = env.getMasterServices();
// Is master fully online? If not, yield. No processing of servers unless master is up
if (!services.getAssignmentManager().isFailoverCleanupDone()) {
throwProcedureYieldException("Waiting on master failover to complete");
}
// If we have not yet notified that we are processing a dead server, we should do now.
if (!notifiedDeadServer) {
services.getServerManager().getDeadServers().notifyServer(serverName);
notifiedDeadServer = true;
}
try {
switch(state) {
case SERVER_CRASH_START:
LOG.info("Start processing crashed " + this.serverName);
start(env);
// If carrying meta, process it first. Else, get list of regions on crashed server.
if (this.carryingMeta)
setNextState(ServerCrashState.SERVER_CRASH_PROCESS_META);
else
setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS);
break;
case SERVER_CRASH_GET_REGIONS:
// If hbase:meta is not assigned, yield.
if (!isMetaAssignedQuickTest(env)) {
// isMetaAssignedQuickTest does not really wait. Let's delay a little before
// another round of execution.
long wait = env.getMasterConfiguration().getLong(KEY_SHORT_WAIT_ON_META, DEFAULT_SHORT_WAIT_ON_META);
wait = wait / 10;
Thread.sleep(wait);
throwProcedureYieldException("Waiting on hbase:meta assignment");
}
this.regionsOnCrashedServer = services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName);
// distributed log splitting (DLS) vs distributed log replay (DLR).
if (!this.shouldSplitWal) {
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
} else if (this.distributedLogReplay) {
setNextState(ServerCrashState.SERVER_CRASH_PREPARE_LOG_REPLAY);
} else {
setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
}
break;
case SERVER_CRASH_PROCESS_META:
// If we fail processing hbase:meta, yield.
if (!processMeta(env)) {
throwProcedureYieldException("Waiting on regions-in-transition to clear");
}
setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS);
break;
case SERVER_CRASH_PREPARE_LOG_REPLAY:
prepareLogReplay(env, this.regionsOnCrashedServer);
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
break;
case SERVER_CRASH_SPLIT_LOGS:
splitLogs(env);
// If DLR, go to FINISH. Otherwise, if DLS, go to SERVER_CRASH_CALC_REGIONS_TO_ASSIGN
if (this.distributedLogReplay)
setNextState(ServerCrashState.SERVER_CRASH_FINISH);
else
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
break;
case SERVER_CRASH_ASSIGN:
List<HRegionInfo> regionsToAssign = calcRegionsToAssign(env);
// Assign may not be idempotent. SSH used to requeue the SSH if we got an IOE assigning
// which is what we are mimicing here but it looks prone to double assignment if assign
// fails midway. TODO: Test.
// If no regions to assign, skip assign and skip to the finish.
boolean regions = regionsToAssign != null && !regionsToAssign.isEmpty();
if (regions) {
this.regionsAssigned = regionsToAssign;
if (!assign(env, regionsToAssign)) {
throwProcedureYieldException("Failed assign; will retry");
}
}
if (this.shouldSplitWal && distributedLogReplay) {
// Take this route even if there are apparently no regions assigned. This may be our
// second time through here; i.e. we assigned and crashed just about here. On second
// time through, there will be no regions because we assigned them in the previous step.
// Even though no regions, we need to go through here to clean up the DLR zk markers.
setNextState(ServerCrashState.SERVER_CRASH_WAIT_ON_ASSIGN);
} else {
setNextState(ServerCrashState.SERVER_CRASH_FINISH);
}
break;
case SERVER_CRASH_WAIT_ON_ASSIGN:
// If the wait on assign failed, yield -- if we have regions to assign.
if (this.regionsAssigned != null && !this.regionsAssigned.isEmpty()) {
if (!waitOnAssign(env, this.regionsAssigned)) {
throwProcedureYieldException("Waiting on region assign");
}
}
setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
break;
case SERVER_CRASH_FINISH:
LOG.info("Finished processing of crashed " + serverName);
services.getServerManager().getDeadServers().finish(serverName);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);
}
} catch (IOException e) {
LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + "; retry", e);
} catch (InterruptedException e) {
// TODO: Make executor allow IEs coming up out of execute.
LOG.warn("Interrupted serverName=" + this.serverName + ", state=" + state + "; retry", e);
Thread.currentThread().interrupt();
}
return Flow.HAS_MORE_STATE;
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class TestBaseLoadBalancer method testRandomAssignment.
private void testRandomAssignment(int numberOfIdleServers) throws Exception {
assert numberOfIdleServers > 0;
List<ServerName> idleServers = new ArrayList<>(numberOfIdleServers);
for (int i = 0; i != numberOfIdleServers; ++i) {
idleServers.add(ServerName.valueOf("server-" + i, 1000, 1L));
}
List<ServerName> allServers = new ArrayList<>(idleServers.size() + 1);
allServers.add(ServerName.valueOf("server-" + numberOfIdleServers, 1000, 1L));
allServers.addAll(idleServers);
LoadBalancer balancer = new MockBalancer() {
@Override
public boolean shouldBeOnMaster(HRegionInfo region) {
return false;
}
};
Configuration conf = HBaseConfiguration.create();
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
balancer.setConf(conf);
ServerManager sm = Mockito.mock(ServerManager.class);
Mockito.when(sm.getOnlineServersListWithPredicator(allServers, BaseLoadBalancer.IDLE_SERVER_PREDICATOR)).thenReturn(idleServers);
MasterServices services = Mockito.mock(MasterServices.class);
Mockito.when(services.getServerManager()).thenReturn(sm);
balancer.setMasterServices(services);
HRegionInfo hri1 = new HRegionInfo(TableName.valueOf(name.getMethodName()), "key1".getBytes(), "key2".getBytes(), false, 100);
assertNull(balancer.randomAssignment(hri1, Collections.EMPTY_LIST));
assertNull(balancer.randomAssignment(hri1, null));
for (int i = 0; i != 3; ++i) {
ServerName sn = balancer.randomAssignment(hri1, allServers);
assertTrue("actual:" + sn + ", except:" + idleServers, idleServers.contains(sn));
}
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class TestBaseLoadBalancer method beforeAllTests.
@BeforeClass
public static void beforeAllTests() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
loadBalancer = new MockBalancer();
loadBalancer.setConf(conf);
MasterServices st = Mockito.mock(MasterServices.class);
Mockito.when(st.getServerName()).thenReturn(master);
loadBalancer.setMasterServices(st);
// Set up the rack topologies (5 machines per rack)
rackManager = Mockito.mock(RackManager.class);
for (int i = 0; i < NUM_SERVERS; i++) {
servers[i] = ServerName.valueOf("foo" + i + ":1234", -1);
if (i < 5) {
Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack1");
}
if (i >= 5 && i < 10) {
Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack2");
}
if (i >= 10) {
Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack3");
}
}
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class TestSimpleRegionNormalizer method setupMocksForNormalizer.
protected void setupMocksForNormalizer(Map<byte[], Integer> regionSizes, List<HRegionInfo> hris) {
masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS);
masterRpcServices = Mockito.mock(MasterRpcServices.class, RETURNS_DEEP_STUBS);
// for simplicity all regions are assumed to be on one server; doesn't matter to us
ServerName sn = ServerName.valueOf("localhost", 0, 1L);
when(masterServices.getAssignmentManager().getRegionStates().getRegionsOfTable(any(TableName.class))).thenReturn(hris);
when(masterServices.getAssignmentManager().getRegionStates().getRegionServerOfRegion(any(HRegionInfo.class))).thenReturn(sn);
for (Map.Entry<byte[], Integer> region : regionSizes.entrySet()) {
RegionLoad regionLoad = Mockito.mock(RegionLoad.class);
when(regionLoad.getName()).thenReturn(region.getKey());
when(regionLoad.getStorefileSizeMB()).thenReturn(region.getValue());
when(masterServices.getServerManager().getLoad(sn).getRegionsLoad().get(region.getKey())).thenReturn(regionLoad);
}
try {
when(masterRpcServices.isSplitOrMergeEnabled(any(RpcController.class), any(IsSplitOrMergeEnabledRequest.class))).thenReturn(IsSplitOrMergeEnabledResponse.newBuilder().setEnabled(true).build());
} catch (ServiceException se) {
LOG.debug("error setting isSplitOrMergeEnabled switch", se);
}
normalizer.setMasterServices(masterServices);
normalizer.setMasterRpcServices(masterRpcServices);
}
use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.
the class TestCoprocessorConfiguration method testMasterCoprocessorHostDefaults.
@Test
public void testMasterCoprocessorHostDefaults() throws Exception {
Configuration conf = new Configuration(CONF);
MasterServices masterServices = mock(MasterServices.class);
systemCoprocessorLoaded.set(false);
new MasterCoprocessorHost(masterServices, conf);
assertEquals("System coprocessors loading default was not honored", systemCoprocessorLoaded.get(), CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED);
}
Aggregations