Search in sources :

Example 16 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class ServerCrashProcedure method executeFromState.

@Override
protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState state) throws ProcedureSuspendedException, ProcedureYieldException {
    final MasterServices services = env.getMasterServices();
    final AssignmentManager am = env.getAssignmentManager();
    updateProgress(true);
    // Server gets removed from processing list below on procedure successful finish.
    if (!notifiedDeadServer) {
        notifiedDeadServer = true;
    }
    switch(state) {
        case SERVER_CRASH_START:
        case SERVER_CRASH_SPLIT_META_LOGS:
        case SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR:
        case SERVER_CRASH_ASSIGN_META:
            break;
        default:
            // If hbase:meta is not assigned, yield.
            if (env.getAssignmentManager().waitMetaLoaded(this)) {
                throw new ProcedureSuspendedException();
            }
    }
    try {
        switch(state) {
            case SERVER_CRASH_START:
                LOG.info("Start " + this);
                // If carrying meta, process it first. Else, get list of regions on crashed server.
                if (this.carryingMeta) {
                    setNextState(ServerCrashState.SERVER_CRASH_SPLIT_META_LOGS);
                } else {
                    setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS);
                }
                break;
            case SERVER_CRASH_SPLIT_META_LOGS:
                if (env.getMasterConfiguration().getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
                    zkCoordinatedSplitMetaLogs(env);
                    setNextState(ServerCrashState.SERVER_CRASH_ASSIGN_META);
                } else {
                    am.getRegionStates().metaLogSplitting(serverName);
                    addChildProcedure(createSplittingWalProcedures(env, true));
                    setNextState(ServerCrashState.SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR);
                }
                break;
            case SERVER_CRASH_DELETE_SPLIT_META_WALS_DIR:
                if (isSplittingDone(env, true)) {
                    setNextState(ServerCrashState.SERVER_CRASH_ASSIGN_META);
                    am.getRegionStates().metaLogSplit(serverName);
                } else {
                    setNextState(ServerCrashState.SERVER_CRASH_SPLIT_META_LOGS);
                }
                break;
            case SERVER_CRASH_ASSIGN_META:
                assignRegions(env, Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO));
                setNextState(ServerCrashState.SERVER_CRASH_GET_REGIONS);
                break;
            case SERVER_CRASH_GET_REGIONS:
                this.regionsOnCrashedServer = getRegionsOnCrashedServer(env);
                // if we should do distributed log splitting.
                if (regionsOnCrashedServer != null) {
                    LOG.info("{} had {} regions", serverName, regionsOnCrashedServer.size());
                    if (LOG.isTraceEnabled()) {
                        this.regionsOnCrashedServer.stream().forEach(ri -> LOG.trace(ri.getShortNameToLog()));
                    }
                }
                if (!this.shouldSplitWal) {
                    setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
                } else {
                    setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
                }
                break;
            case SERVER_CRASH_SPLIT_LOGS:
                if (env.getMasterConfiguration().getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
                    zkCoordinatedSplitLogs(env);
                    setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
                } else {
                    am.getRegionStates().logSplitting(this.serverName);
                    addChildProcedure(createSplittingWalProcedures(env, false));
                    setNextState(ServerCrashState.SERVER_CRASH_DELETE_SPLIT_WALS_DIR);
                }
                break;
            case SERVER_CRASH_DELETE_SPLIT_WALS_DIR:
                if (isSplittingDone(env, false)) {
                    cleanupSplitDir(env);
                    setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
                    am.getRegionStates().logSplit(this.serverName);
                } else {
                    setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
                }
                break;
            case SERVER_CRASH_ASSIGN:
                // Filter changes this.regionsOnCrashedServer.
                if (filterDefaultMetaRegions()) {
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Assigning regions " + RegionInfo.getShortNameToLog(regionsOnCrashedServer) + ", " + this + "; cycles=" + getCycles());
                    }
                    assignRegions(env, regionsOnCrashedServer);
                }
                setNextState(ServerCrashState.SERVER_CRASH_CLAIM_REPLICATION_QUEUES);
                break;
            case SERVER_CRASH_HANDLE_RIT2:
                // Noop. Left in place because we used to call handleRIT here for a second time
                // but no longer necessary since HBASE-20634.
                setNextState(ServerCrashState.SERVER_CRASH_CLAIM_REPLICATION_QUEUES);
                break;
            case SERVER_CRASH_CLAIM_REPLICATION_QUEUES:
                addChildProcedure(new ClaimReplicationQueuesProcedure(serverName));
                setNextState(ServerCrashState.SERVER_CRASH_FINISH);
                break;
            case SERVER_CRASH_FINISH:
                LOG.info("removed crashed server {} after splitting done", serverName);
                services.getAssignmentManager().getRegionStates().removeServer(serverName);
                updateProgress(true);
                return Flow.NO_MORE_STATE;
            default:
                throw new UnsupportedOperationException("unhandled state=" + state);
        }
    } catch (IOException e) {
        LOG.warn("Failed state=" + state + ", retry " + this + "; cycles=" + getCycles(), e);
    }
    return Flow.HAS_MORE_STATE;
}
Also used : ClaimReplicationQueuesProcedure(org.apache.hadoop.hbase.master.replication.ClaimReplicationQueuesProcedure) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) MasterServices(org.apache.hadoop.hbase.master.MasterServices) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) ProcedureSuspendedException(org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException)

Example 17 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class RSGroupUtil method listTablesInRSGroup.

public static List<TableName> listTablesInRSGroup(MasterServices master, String groupName) throws IOException {
    List<TableName> tables = new ArrayList<>();
    boolean isDefaultGroup = RSGroupInfo.DEFAULT_GROUP.equals(groupName);
    for (TableDescriptor td : master.getTableDescriptors().getAll().values()) {
        // no config means in default group
        if (RSGroupUtil.getRSGroupInfo(master, master.getRSGroupInfoManager(), td.getTableName()).map(g -> g.getName().equals(groupName)).orElse(isDefaultGroup)) {
            tables.add(td.getTableName());
        }
    }
    return tables;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Logger(org.slf4j.Logger) Predicate(java.util.function.Predicate) Collection(java.util.Collection) LoggerFactory(org.slf4j.LoggerFactory) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) IOException(java.io.IOException) ArrayList(java.util.ArrayList) ClusterSchema(org.apache.hadoop.hbase.master.ClusterSchema) List(java.util.List) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Configuration(org.apache.hadoop.conf.Configuration) MasterServices(org.apache.hadoop.hbase.master.MasterServices) Optional(java.util.Optional) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) TableName(org.apache.hadoop.hbase.TableName) ArrayList(java.util.ArrayList) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Example 18 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class TestCoprocessorConfiguration method testMasterCoprocessorHostDefaults.

@Test
public void testMasterCoprocessorHostDefaults() throws Exception {
    Configuration conf = new Configuration(CONF);
    MasterServices masterServices = mock(MasterServices.class);
    systemCoprocessorLoaded.set(false);
    new MasterCoprocessorHost(masterServices, conf);
    assertEquals("System coprocessors loading default was not honored", CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED, systemCoprocessorLoaded.get());
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) MasterServices(org.apache.hadoop.hbase.master.MasterServices) Test(org.junit.Test)

Example 19 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class TestMasterQuotaManager method testUninitializedQuotaManangerDoesNotFail.

@Test
public void testUninitializedQuotaManangerDoesNotFail() {
    MasterServices masterServices = mock(MasterServices.class);
    MasterQuotaManager manager = new MasterQuotaManager(masterServices);
    manager.addRegionSize(null, 0, 0);
    assertNotNull(manager.snapshotRegionSizes());
}
Also used : MasterServices(org.apache.hadoop.hbase.master.MasterServices) Test(org.junit.Test)

Example 20 with MasterServices

use of org.apache.hadoop.hbase.master.MasterServices in project hbase by apache.

the class TestMasterQuotaManager method testOldEntriesRemoved.

@Test
public void testOldEntriesRemoved() {
    MasterServices masterServices = mock(MasterServices.class);
    MasterQuotaManager manager = new MasterQuotaManager(masterServices);
    manager.initializeRegionSizes();
    // Mock out some regions
    TableName tableName = TableName.valueOf("foo");
    RegionInfo region1 = createRegionInfo(tableName, null, toBytes("a"));
    RegionInfo region2 = createRegionInfo(tableName, toBytes("a"), toBytes("b"));
    RegionInfo region3 = createRegionInfo(tableName, toBytes("b"), toBytes("c"));
    RegionInfo region4 = createRegionInfo(tableName, toBytes("c"), toBytes("d"));
    RegionInfo region5 = createRegionInfo(tableName, toBytes("d"), null);
    final long size = 0;
    long time1 = 10;
    manager.addRegionSize(region1, size, time1);
    manager.addRegionSize(region2, size, time1);
    long time2 = 20;
    manager.addRegionSize(region3, size, time2);
    manager.addRegionSize(region4, size, time2);
    long time3 = 30;
    manager.addRegionSize(region5, size, time3);
    assertEquals(5, manager.snapshotRegionSizes().size());
    QuotaObserverChore chore = mock(QuotaObserverChore.class);
    // Prune nothing
    assertEquals(0, manager.pruneEntriesOlderThan(0, chore));
    assertEquals(5, manager.snapshotRegionSizes().size());
    assertEquals(0, manager.pruneEntriesOlderThan(10, chore));
    assertEquals(5, manager.snapshotRegionSizes().size());
    // Prune the elements at time1
    assertEquals(2, manager.pruneEntriesOlderThan(15, chore));
    assertEquals(3, manager.snapshotRegionSizes().size());
    // Prune the elements at time2
    assertEquals(2, manager.pruneEntriesOlderThan(30, chore));
    assertEquals(1, manager.snapshotRegionSizes().size());
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) MasterServices(org.apache.hadoop.hbase.master.MasterServices) Test(org.junit.Test)

Aggregations

MasterServices (org.apache.hadoop.hbase.master.MasterServices)24 IOException (java.io.IOException)9 TableName (org.apache.hadoop.hbase.TableName)8 Test (org.junit.Test)8 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)7 Map (java.util.Map)5 ServerName (org.apache.hadoop.hbase.ServerName)5 List (java.util.List)4 Configuration (org.apache.hadoop.conf.Configuration)4 Path (org.apache.hadoop.fs.Path)4 HashMap (java.util.HashMap)3 TableDescriptors (org.apache.hadoop.hbase.TableDescriptors)3 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)3 ArrayList (java.util.ArrayList)2 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)2 LogEntry (org.apache.hadoop.hbase.client.LogEntry)2 AssignmentManager (org.apache.hadoop.hbase.master.AssignmentManager)2 MasterFileSystem (org.apache.hadoop.hbase.master.MasterFileSystem)2 ServerManager (org.apache.hadoop.hbase.master.ServerManager)2 AssignmentManager (org.apache.hadoop.hbase.master.assignment.AssignmentManager)2