use of org.apache.hadoop.hbase.procedure2.ProcedureEvent in project hbase by apache.
the class TestSyncReplicationReplayWALManager method setUp.
@Before
public void setUp() throws IOException, ReplicationException {
wokenProcedures = new ArrayDeque<>();
onlineServers = new HashSet<>();
listeners = new ArrayList<>();
ServerManager serverManager = mock(ServerManager.class);
doAnswer(inv -> listeners.add(inv.getArgument(0))).when(serverManager).registerListener(any(ServerListener.class));
ServerMetrics serverMetrics = mock(ServerMetrics.class);
doAnswer(inv -> onlineServers.stream().collect(Collectors.toMap(Function.identity(), k -> serverMetrics))).when(serverManager).getOnlineServers();
MasterFileSystem mfs = mock(MasterFileSystem.class);
when(mfs.getFileSystem()).thenReturn(UTIL.getTestFileSystem());
when(mfs.getWALRootDir()).thenReturn(new Path("/"));
scheduler = mock(MasterProcedureScheduler.class);
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
ProcedureEvent<?> event = ((ProcedureEvent<?>[]) invocation.getArgument(0))[0];
event.wakeInternal(new MasterProcedureScheduler(pid -> null) {
@Override
public void addFront(Iterator<Procedure> procedureIterator) {
procedureIterator.forEachRemaining(wokenProcedures::add);
}
});
return null;
}
}).when(scheduler).wakeEvents(any(ProcedureEvent[].class));
MasterProcedureEnv env = mock(MasterProcedureEnv.class);
when(env.getProcedureScheduler()).thenReturn(scheduler);
ProcedureExecutor<MasterProcedureEnv> procExec = mock(ProcedureExecutor.class);
when(procExec.getEnvironment()).thenReturn(env);
MasterServices services = mock(MasterServices.class);
when(services.getServerManager()).thenReturn(serverManager);
when(services.getMasterFileSystem()).thenReturn(mfs);
when(services.getMasterProcedureExecutor()).thenReturn(procExec);
manager = new SyncReplicationReplayWALManager(services);
assertEquals(1, listeners.size());
}
use of org.apache.hadoop.hbase.procedure2.ProcedureEvent in project hbase by apache.
the class RegionRemoteProcedureBase method execute.
@Override
protected Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env) throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException {
RegionStateNode regionNode = getRegionNode(env);
regionNode.lock();
try {
switch(state) {
case REGION_REMOTE_PROCEDURE_DISPATCH:
{
// The code which wakes us up also needs to lock the RSN so here we do not need to
// synchronize
// on the event.
ProcedureEvent<?> event = regionNode.getProcedureEvent();
try {
env.getRemoteDispatcher().addOperationToNode(targetServer, this);
} catch (FailedRemoteDispatchException e) {
LOG.warn("Can not add remote operation {} for region {} to server {}, this usually " + "because the server is alread dead, give up and mark the procedure as complete, " + "the parent procedure will take care of this.", this, region, targetServer, e);
unattach(env);
return null;
}
event.suspend();
event.suspendIfNotReady(this);
throw new ProcedureSuspendedException();
}
case REGION_REMOTE_PROCEDURE_REPORT_SUCCEED:
env.getAssignmentManager().persistToMeta(regionNode);
unattach(env);
return null;
case REGION_REMOTE_PROCEDURE_DISPATCH_FAIL:
// the remote call is failed so we do not need to change the region state, just return.
unattach(env);
return null;
case REGION_REMOTE_PROCEDURE_SERVER_CRASH:
env.getAssignmentManager().regionClosedAbnormally(regionNode);
unattach(env);
return null;
default:
throw new IllegalStateException("Unknown state: " + state);
}
} catch (IOException e) {
if (retryCounter == null) {
retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration());
}
long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
LOG.warn("Failed updating meta, suspend {}secs {}; {};", backoff / 1000, this, regionNode, e);
setTimeout(Math.toIntExact(backoff));
setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
skipPersistence();
throw new ProcedureSuspendedException();
} finally {
regionNode.unlock();
}
}
use of org.apache.hadoop.hbase.procedure2.ProcedureEvent in project hbase by apache.
the class AssignmentManager method acceptPlan.
private void acceptPlan(final HashMap<RegionInfo, RegionStateNode> regions, final Map<ServerName, List<RegionInfo>> plan) throws HBaseIOException {
final ProcedureEvent<?>[] events = new ProcedureEvent[regions.size()];
final long st = EnvironmentEdgeManager.currentTime();
if (plan.isEmpty()) {
throw new HBaseIOException("unable to compute plans for regions=" + regions.size());
}
int evcount = 0;
for (Map.Entry<ServerName, List<RegionInfo>> entry : plan.entrySet()) {
final ServerName server = entry.getKey();
for (RegionInfo hri : entry.getValue()) {
final RegionStateNode regionNode = regions.get(hri);
regionNode.setRegionLocation(server);
if (server.equals(LoadBalancer.BOGUS_SERVER_NAME) && regionNode.isSystemTable()) {
assignQueueLock.lock();
try {
pendingAssignQueue.add(regionNode);
} finally {
assignQueueLock.unlock();
}
} else {
events[evcount++] = regionNode.getProcedureEvent();
}
}
}
ProcedureEvent.wakeEvents(getProcedureScheduler(), events);
final long et = EnvironmentEdgeManager.currentTime();
if (LOG.isTraceEnabled()) {
LOG.trace("ASSIGN ACCEPT " + events.length + " -> " + StringUtils.humanTimeDiff(et - st));
}
}
use of org.apache.hadoop.hbase.procedure2.ProcedureEvent in project hbase by apache.
the class TestMasterProcedureScheduler method testSuspendedProcedure.
@Test
public void testSuspendedProcedure() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
queue.addBack(new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.READ));
queue.addBack(new TestTableProcedure(2, tableName, TableProcedureInterface.TableOperationType.READ));
Procedure<?> proc = queue.poll();
assertEquals(1, proc.getProcId());
// suspend
ProcedureEvent<?> event = new ProcedureEvent<>("testSuspendedProcedureEvent");
assertEquals(true, event.suspendIfNotReady(proc));
proc = queue.poll();
assertEquals(2, proc.getProcId());
assertEquals(null, queue.poll(0));
// resume
event.wake(queue);
proc = queue.poll();
assertEquals(1, proc.getProcId());
assertEquals(null, queue.poll(0));
}
Aggregations