Search in sources :

Example 1 with ProcedureEvent

use of org.apache.hadoop.hbase.procedure2.ProcedureEvent in project hbase by apache.

the class TestSyncReplicationReplayWALManager method setUp.

@Before
public void setUp() throws IOException, ReplicationException {
    wokenProcedures = new ArrayDeque<>();
    onlineServers = new HashSet<>();
    listeners = new ArrayList<>();
    ServerManager serverManager = mock(ServerManager.class);
    doAnswer(inv -> listeners.add(inv.getArgument(0))).when(serverManager).registerListener(any(ServerListener.class));
    ServerMetrics serverMetrics = mock(ServerMetrics.class);
    doAnswer(inv -> onlineServers.stream().collect(Collectors.toMap(Function.identity(), k -> serverMetrics))).when(serverManager).getOnlineServers();
    MasterFileSystem mfs = mock(MasterFileSystem.class);
    when(mfs.getFileSystem()).thenReturn(UTIL.getTestFileSystem());
    when(mfs.getWALRootDir()).thenReturn(new Path("/"));
    scheduler = mock(MasterProcedureScheduler.class);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            ProcedureEvent<?> event = ((ProcedureEvent<?>[]) invocation.getArgument(0))[0];
            event.wakeInternal(new MasterProcedureScheduler(pid -> null) {

                @Override
                public void addFront(Iterator<Procedure> procedureIterator) {
                    procedureIterator.forEachRemaining(wokenProcedures::add);
                }
            });
            return null;
        }
    }).when(scheduler).wakeEvents(any(ProcedureEvent[].class));
    MasterProcedureEnv env = mock(MasterProcedureEnv.class);
    when(env.getProcedureScheduler()).thenReturn(scheduler);
    ProcedureExecutor<MasterProcedureEnv> procExec = mock(ProcedureExecutor.class);
    when(procExec.getEnvironment()).thenReturn(env);
    MasterServices services = mock(MasterServices.class);
    when(services.getServerManager()).thenReturn(serverManager);
    when(services.getMasterFileSystem()).thenReturn(mfs);
    when(services.getMasterProcedureExecutor()).thenReturn(procExec);
    manager = new SyncReplicationReplayWALManager(services);
    assertEquals(1, listeners.size());
}
Also used : ServerManager(org.apache.hadoop.hbase.master.ServerManager) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) ProcedureEvent(org.apache.hadoop.hbase.procedure2.ProcedureEvent) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) MasterProcedureScheduler(org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler) MasterServices(org.apache.hadoop.hbase.master.MasterServices) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Iterator(java.util.Iterator) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) ServerListener(org.apache.hadoop.hbase.master.ServerListener) Before(org.junit.Before)

Example 2 with ProcedureEvent

use of org.apache.hadoop.hbase.procedure2.ProcedureEvent in project hbase by apache.

the class RegionRemoteProcedureBase method execute.

@Override
protected Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env) throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException {
    RegionStateNode regionNode = getRegionNode(env);
    regionNode.lock();
    try {
        switch(state) {
            case REGION_REMOTE_PROCEDURE_DISPATCH:
                {
                    // The code which wakes us up also needs to lock the RSN so here we do not need to
                    // synchronize
                    // on the event.
                    ProcedureEvent<?> event = regionNode.getProcedureEvent();
                    try {
                        env.getRemoteDispatcher().addOperationToNode(targetServer, this);
                    } catch (FailedRemoteDispatchException e) {
                        LOG.warn("Can not add remote operation {} for region {} to server {}, this usually " + "because the server is alread dead, give up and mark the procedure as complete, " + "the parent procedure will take care of this.", this, region, targetServer, e);
                        unattach(env);
                        return null;
                    }
                    event.suspend();
                    event.suspendIfNotReady(this);
                    throw new ProcedureSuspendedException();
                }
            case REGION_REMOTE_PROCEDURE_REPORT_SUCCEED:
                env.getAssignmentManager().persistToMeta(regionNode);
                unattach(env);
                return null;
            case REGION_REMOTE_PROCEDURE_DISPATCH_FAIL:
                // the remote call is failed so we do not need to change the region state, just return.
                unattach(env);
                return null;
            case REGION_REMOTE_PROCEDURE_SERVER_CRASH:
                env.getAssignmentManager().regionClosedAbnormally(regionNode);
                unattach(env);
                return null;
            default:
                throw new IllegalStateException("Unknown state: " + state);
        }
    } catch (IOException e) {
        if (retryCounter == null) {
            retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration());
        }
        long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
        LOG.warn("Failed updating meta, suspend {}secs {}; {};", backoff / 1000, this, regionNode, e);
        setTimeout(Math.toIntExact(backoff));
        setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
        skipPersistence();
        throw new ProcedureSuspendedException();
    } finally {
        regionNode.unlock();
    }
}
Also used : FailedRemoteDispatchException(org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException) ProcedureEvent(org.apache.hadoop.hbase.procedure2.ProcedureEvent) IOException(java.io.IOException) ProcedureSuspendedException(org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException)

Example 3 with ProcedureEvent

use of org.apache.hadoop.hbase.procedure2.ProcedureEvent in project hbase by apache.

the class AssignmentManager method acceptPlan.

private void acceptPlan(final HashMap<RegionInfo, RegionStateNode> regions, final Map<ServerName, List<RegionInfo>> plan) throws HBaseIOException {
    final ProcedureEvent<?>[] events = new ProcedureEvent[regions.size()];
    final long st = EnvironmentEdgeManager.currentTime();
    if (plan.isEmpty()) {
        throw new HBaseIOException("unable to compute plans for regions=" + regions.size());
    }
    int evcount = 0;
    for (Map.Entry<ServerName, List<RegionInfo>> entry : plan.entrySet()) {
        final ServerName server = entry.getKey();
        for (RegionInfo hri : entry.getValue()) {
            final RegionStateNode regionNode = regions.get(hri);
            regionNode.setRegionLocation(server);
            if (server.equals(LoadBalancer.BOGUS_SERVER_NAME) && regionNode.isSystemTable()) {
                assignQueueLock.lock();
                try {
                    pendingAssignQueue.add(regionNode);
                } finally {
                    assignQueueLock.unlock();
                }
            } else {
                events[evcount++] = regionNode.getProcedureEvent();
            }
        }
    }
    ProcedureEvent.wakeEvents(getProcedureScheduler(), events);
    final long et = EnvironmentEdgeManager.currentTime();
    if (LOG.isTraceEnabled()) {
        LOG.trace("ASSIGN ACCEPT " + events.length + " -> " + StringUtils.humanTimeDiff(et - st));
    }
}
Also used : ProcedureEvent(org.apache.hadoop.hbase.procedure2.ProcedureEvent) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) ServerName(org.apache.hadoop.hbase.ServerName) List(java.util.List) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Map(java.util.Map) HashMap(java.util.HashMap)

Example 4 with ProcedureEvent

use of org.apache.hadoop.hbase.procedure2.ProcedureEvent in project hbase by apache.

the class TestMasterProcedureScheduler method testSuspendedProcedure.

@Test
public void testSuspendedProcedure() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    queue.addBack(new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.READ));
    queue.addBack(new TestTableProcedure(2, tableName, TableProcedureInterface.TableOperationType.READ));
    Procedure<?> proc = queue.poll();
    assertEquals(1, proc.getProcId());
    // suspend
    ProcedureEvent<?> event = new ProcedureEvent<>("testSuspendedProcedureEvent");
    assertEquals(true, event.suspendIfNotReady(proc));
    proc = queue.poll();
    assertEquals(2, proc.getProcId());
    assertEquals(null, queue.poll(0));
    // resume
    event.wake(queue);
    proc = queue.poll();
    assertEquals(1, proc.getProcId());
    assertEquals(null, queue.poll(0));
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ProcedureEvent(org.apache.hadoop.hbase.procedure2.ProcedureEvent) Test(org.junit.Test)

Aggregations

ProcedureEvent (org.apache.hadoop.hbase.procedure2.ProcedureEvent)4 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 Iterator (java.util.Iterator)1 List (java.util.List)1 Map (java.util.Map)1 Path (org.apache.hadoop.fs.Path)1 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)1 ServerMetrics (org.apache.hadoop.hbase.ServerMetrics)1 ServerName (org.apache.hadoop.hbase.ServerName)1 TableName (org.apache.hadoop.hbase.TableName)1 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)1 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)1 MasterServices (org.apache.hadoop.hbase.master.MasterServices)1 ServerListener (org.apache.hadoop.hbase.master.ServerListener)1 ServerManager (org.apache.hadoop.hbase.master.ServerManager)1 MasterProcedureEnv (org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv)1 MasterProcedureScheduler (org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler)1 FailedRemoteDispatchException (org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException)1