Search in sources :

Example 1 with AssignmentManager

use of org.apache.hadoop.hbase.master.assignment.AssignmentManager in project hbase by apache.

the class HMaster method getClusterMetricsWithoutCoprocessor.

public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet<Option> options) throws InterruptedIOException {
    ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder();
    // we return all information to client if the list of Option is empty.
    if (options.isEmpty()) {
        options = EnumSet.allOf(Option.class);
    }
    // TASKS and/or LIVE_SERVERS will populate this map, which will be given to the builder if
    // not null after option processing completes.
    Map<ServerName, ServerMetrics> serverMetricsMap = null;
    for (Option opt : options) {
        switch(opt) {
            case HBASE_VERSION:
                builder.setHBaseVersion(VersionInfo.getVersion());
                break;
            case CLUSTER_ID:
                builder.setClusterId(getClusterId());
                break;
            case MASTER:
                builder.setMasterName(getServerName());
                break;
            case BACKUP_MASTERS:
                builder.setBackerMasterNames(getBackupMasters());
                break;
            case TASKS:
                {
                    // Master tasks
                    builder.setMasterTasks(TaskMonitor.get().getTasks().stream().map(task -> ServerTaskBuilder.newBuilder().setDescription(task.getDescription()).setStatus(task.getStatus()).setState(ServerTask.State.valueOf(task.getState().name())).setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTimestamp()).build()).collect(Collectors.toList()));
                    // Add entries to serverMetricsMap for all live servers, if we haven't already done so
                    if (serverMetricsMap == null) {
                        serverMetricsMap = getOnlineServers();
                    }
                    break;
                }
            case LIVE_SERVERS:
                {
                    // Add entries to serverMetricsMap for all live servers, if we haven't already done so
                    if (serverMetricsMap == null) {
                        serverMetricsMap = getOnlineServers();
                    }
                    break;
                }
            case DEAD_SERVERS:
                {
                    if (serverManager != null) {
                        builder.setDeadServerNames(new ArrayList<>(serverManager.getDeadServers().copyServerNames()));
                    }
                    break;
                }
            case MASTER_COPROCESSORS:
                {
                    if (cpHost != null) {
                        builder.setMasterCoprocessorNames(Arrays.asList(getMasterCoprocessors()));
                    }
                    break;
                }
            case REGIONS_IN_TRANSITION:
                {
                    if (assignmentManager != null) {
                        builder.setRegionsInTransition(assignmentManager.getRegionStates().getRegionsStateInTransition());
                    }
                    break;
                }
            case BALANCER_ON:
                {
                    if (loadBalancerTracker != null) {
                        builder.setBalancerOn(loadBalancerTracker.isBalancerOn());
                    }
                    break;
                }
            case MASTER_INFO_PORT:
                {
                    if (infoServer != null) {
                        builder.setMasterInfoPort(infoServer.getPort());
                    }
                    break;
                }
            case SERVERS_NAME:
                {
                    if (serverManager != null) {
                        builder.setServerNames(serverManager.getOnlineServersList());
                    }
                    break;
                }
            case TABLE_TO_REGIONS_COUNT:
                {
                    if (isActiveMaster() && isInitialized() && assignmentManager != null) {
                        try {
                            Map<TableName, RegionStatesCount> tableRegionStatesCountMap = new HashMap<>();
                            Map<String, TableDescriptor> tableDescriptorMap = getTableDescriptors().getAll();
                            for (TableDescriptor tableDescriptor : tableDescriptorMap.values()) {
                                TableName tableName = tableDescriptor.getTableName();
                                RegionStatesCount regionStatesCount = assignmentManager.getRegionStatesCount(tableName);
                                tableRegionStatesCountMap.put(tableName, regionStatesCount);
                            }
                            builder.setTableRegionStatesCount(tableRegionStatesCountMap);
                        } catch (IOException e) {
                            LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for Cluster Metrics..", e);
                        }
                    }
                    break;
                }
        }
    }
    if (serverMetricsMap != null) {
        builder.setLiveServerMetrics(serverMetricsMap);
    }
    return builder.build();
}
Also used : DisablePeerProcedure(org.apache.hadoop.hbase.master.replication.DisablePeerProcedure) LockManager(org.apache.hadoop.hbase.master.locking.LockManager) UserProvider(org.apache.hadoop.hbase.security.UserProvider) Server(org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server) RSGroupAdminEndpoint(org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint) GetRegionInfoResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) StringUtils(org.apache.commons.lang3.StringUtils) RetryCounterFactory(org.apache.hadoop.hbase.util.RetryCounterFactory) EnableTableProcedure(org.apache.hadoop.hbase.master.procedure.EnableTableProcedure) AddPeerProcedure(org.apache.hadoop.hbase.master.replication.AddPeerProcedure) TableDescriptorChecker(org.apache.hadoop.hbase.util.TableDescriptorChecker) Future(java.util.concurrent.Future) ProcedureExecutor(org.apache.hadoop.hbase.procedure2.ProcedureExecutor) NamedQueueRecorder(org.apache.hadoop.hbase.namequeues.NamedQueueRecorder) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) MobFileCleanerChore(org.apache.hadoop.hbase.mob.MobFileCleanerChore) ZNodePaths(org.apache.hadoop.hbase.zookeeper.ZNodePaths) ModifyTableProcedure(org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure) Closeables(org.apache.hbase.thirdparty.com.google.common.io.Closeables) EnumSet(java.util.EnumSet) MetaTableLocator(org.apache.hadoop.hbase.zookeeper.MetaTableLocator) Pair(org.apache.hadoop.hbase.util.Pair) MasterRegionFactory(org.apache.hadoop.hbase.master.region.MasterRegionFactory) CommonFSUtils(org.apache.hadoop.hbase.util.CommonFSUtils) CellBuilderType(org.apache.hadoop.hbase.CellBuilderType) WebAppContext(org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext) ModifyColumnFamilyStoreFileTrackerProcedure(org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyColumnFamilyStoreFileTrackerProcedure) ModifyTableStoreFileTrackerProcedure(org.apache.hadoop.hbase.regionserver.storefiletracker.ModifyTableStoreFileTrackerProcedure) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) ExecutorType(org.apache.hadoop.hbase.executor.ExecutorType) ReplicationPeerDescription(org.apache.hadoop.hbase.replication.ReplicationPeerDescription) BaseLoadBalancer(org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer) RSGroupBasedLoadBalancer(org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer) ReplicationLoadSource(org.apache.hadoop.hbase.replication.ReplicationLoadSource) HBASE_SPLIT_WAL_COORDINATED_BY_ZK(org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK) Descriptors(org.apache.hbase.thirdparty.com.google.protobuf.Descriptors) Constructor(java.lang.reflect.Constructor) ProcedureStoreListener(org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureStoreListener) ReopenTableRegionsProcedure(org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure) TaskMonitor(org.apache.hadoop.hbase.monitoring.TaskMonitor) ServerTask(org.apache.hadoop.hbase.ServerTask) Option(org.apache.hadoop.hbase.ClusterMetrics.Option) TableName(org.apache.hadoop.hbase.TableName) ServletHolder(org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder) LoadBalancerTracker(org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker) Service(org.apache.hbase.thirdparty.com.google.protobuf.Service) Sets(org.apache.hbase.thirdparty.com.google.common.collect.Sets) IOException(java.io.IOException) ServerConnector(org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector) SlowLogMasterService(org.apache.hadoop.hbase.master.slowlog.SlowLogMasterService) MasterAddressSyncer(org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer) UnknownHostException(java.net.UnknownHostException) Procedure(org.apache.hadoop.hbase.procedure2.Procedure) ExecutionException(java.util.concurrent.ExecutionException) RSGroupInfoManager(org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager) TableNotDisabledException(org.apache.hadoop.hbase.TableNotDisabledException) RemoteProcedureException(org.apache.hadoop.hbase.procedure2.RemoteProcedureException) RegionStateStore(org.apache.hadoop.hbase.master.assignment.RegionStateStore) NoSuchColumnFamilyException(org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException) MonitoredTask(org.apache.hadoop.hbase.monitoring.MonitoredTask) SpaceQuotaSnapshotNotifier(org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier) CoprocessorHost(org.apache.hadoop.hbase.coprocessor.CoprocessorHost) MasterStatusServlet(org.apache.hadoop.hbase.master.http.MasterStatusServlet) TruncateTableProcedure(org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure) MASTER_HOSTNAME_KEY(org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY) EnablePeerProcedure(org.apache.hadoop.hbase.master.replication.EnablePeerProcedure) MasterProcedureManagerHost(org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PleaseHoldException(org.apache.hadoop.hbase.PleaseHoldException) CellBuilderFactory(org.apache.hadoop.hbase.CellBuilderFactory) MergeTableRegionsProcedure(org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure) ReplicationException(org.apache.hadoop.hbase.replication.ReplicationException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Maps(org.apache.hbase.thirdparty.com.google.common.collect.Maps) CatalogJanitor(org.apache.hadoop.hbase.master.janitor.CatalogJanitor) ReplicationPeerManager(org.apache.hadoop.hbase.master.replication.ReplicationPeerManager) Collection(java.util.Collection) HBaseInterfaceAudience(org.apache.hadoop.hbase.HBaseInterfaceAudience) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) Collectors(java.util.stream.Collectors) SnapshotQuotaObserverChore(org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore) Objects(java.util.Objects) FutureUtils(org.apache.hadoop.hbase.util.FutureUtils) UnknownRegionException(org.apache.hadoop.hbase.UnknownRegionException) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) MasterAddressTracker(org.apache.hadoop.hbase.zookeeper.MasterAddressTracker) LogCleaner(org.apache.hadoop.hbase.master.cleaner.LogCleaner) RollingUpgradeChore(org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore) MasterStoppedException(org.apache.hadoop.hbase.exceptions.MasterStoppedException) HConstants(org.apache.hadoop.hbase.HConstants) QuotaTableUtil(org.apache.hadoop.hbase.quotas.QuotaTableUtil) HBASE_MASTER_LOGCLEANER_PLUGINS(org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS) QuotaUtil(org.apache.hadoop.hbase.quotas.QuotaUtil) TransitPeerSyncReplicationStateProcedure(org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure) LinkedList(java.util.LinkedList) RemovePeerProcedure(org.apache.hadoop.hbase.master.replication.RemovePeerProcedure) DeleteNamespaceProcedure(org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure) Threads(org.apache.hadoop.hbase.util.Threads) BalanceSwitchMode(org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode) MasterQuotaManager(org.apache.hadoop.hbase.quotas.MasterQuotaManager) Bytes(org.apache.hadoop.hbase.util.Bytes) RegionStates(org.apache.hadoop.hbase.master.assignment.RegionStates) Logger(org.slf4j.Logger) KeeperException(org.apache.zookeeper.KeeperException) DeleteTableProcedure(org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure) BalanceRequest(org.apache.hadoop.hbase.client.BalanceRequest) MasterSwitchType(org.apache.hadoop.hbase.client.MasterSwitchType) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) ProcedureSyncWait(org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait) MaintenanceLoadBalancer(org.apache.hadoop.hbase.master.balancer.MaintenanceLoadBalancer) Lists(org.apache.hbase.thirdparty.com.google.common.collect.Lists) RequestConverter(org.apache.hadoop.hbase.shaded.protobuf.RequestConverter) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) ZKClusterId(org.apache.hadoop.hbase.zookeeper.ZKClusterId) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) TransitRegionStateProcedure(org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure) HBaseServerBase(org.apache.hadoop.hbase.HBaseServerBase) ServerCrashProcedure(org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure) Comparator(java.util.Comparator) MasterDumpServlet(org.apache.hadoop.hbase.master.http.MasterDumpServlet) Arrays(java.util.Arrays) NormalizeTableFilterParams(org.apache.hadoop.hbase.client.NormalizeTableFilterParams) UpdatePeerConfigProcedure(org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure) ProcedureStore(org.apache.hadoop.hbase.procedure2.store.ProcedureStore) InetAddress(java.net.InetAddress) InvalidFamilyOperationException(org.apache.hadoop.hbase.InvalidFamilyOperationException) SpaceViolationPolicy(org.apache.hadoop.hbase.quotas.SpaceViolationPolicy) MasterProcedureScheduler(org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler) SecurityConstants(org.apache.hadoop.hbase.security.SecurityConstants) RegionNormalizerManager(org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager) MasterProcedureConstants(org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants) Cell(org.apache.hadoop.hbase.Cell) SpaceQuotaSnapshot(org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot) HttpServlet(javax.servlet.http.HttpServlet) ZKUtil(org.apache.hadoop.hbase.zookeeper.ZKUtil) SnapshotCleanupTracker(org.apache.hadoop.hbase.zookeeper.SnapshotCleanupTracker) Set(java.util.Set) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) ReplicationHFileCleaner(org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner) FSTableDescriptors(org.apache.hadoop.hbase.util.FSTableDescriptors) InvocationTargetException(java.lang.reflect.InvocationTargetException) IdLock(org.apache.hadoop.hbase.util.IdLock) ClusterMetricsBuilder(org.apache.hadoop.hbase.ClusterMetricsBuilder) NonceProcedureRunnable(org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable) ClusterId(org.apache.hadoop.hbase.ClusterId) Superusers(org.apache.hadoop.hbase.security.Superusers) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) ServerTaskBuilder(org.apache.hadoop.hbase.ServerTaskBuilder) DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK(org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK) AbstractPeerProcedure(org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) InterruptedIOException(java.io.InterruptedIOException) RegionStateNode(org.apache.hadoop.hbase.master.assignment.RegionStateNode) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MasterProcedureUtil(org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil) HFileArchiveUtil(org.apache.hadoop.hbase.util.HFileArchiveUtil) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) SnapshotManager(org.apache.hadoop.hbase.master.snapshot.SnapshotManager) InfoServer(org.apache.hadoop.hbase.http.InfoServer) JVMClusterUtil(org.apache.hadoop.hbase.util.JVMClusterUtil) ServerName(org.apache.hadoop.hbase.ServerName) SyncReplicationState(org.apache.hadoop.hbase.replication.SyncReplicationState) RetryCounter(org.apache.hadoop.hbase.util.RetryCounter) ReplicationPeerConfig(org.apache.hadoop.hbase.replication.ReplicationPeerConfig) RegionStatesCount(org.apache.hadoop.hbase.client.RegionStatesCount) Scan(org.apache.hadoop.hbase.client.Scan) HBaseFsck(org.apache.hadoop.hbase.util.HBaseFsck) HttpServer(org.apache.hadoop.hbase.http.HttpServer) BalanceResponse(org.apache.hadoop.hbase.client.BalanceResponse) MasterFlushTableProcedureManager(org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager) SpaceQuotaStatus(org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot.SpaceQuotaStatus) CatalogFamilyFormat(org.apache.hadoop.hbase.CatalogFamilyFormat) TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) RegionNormalizerTracker(org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) MasterRedirectServlet(org.apache.hadoop.hbase.master.http.MasterRedirectServlet) HFileCleaner(org.apache.hadoop.hbase.master.cleaner.HFileCleaner) ClusterStatusChore(org.apache.hadoop.hbase.master.balancer.ClusterStatusChore) ReplicationPeerNotFoundException(org.apache.hadoop.hbase.ReplicationPeerNotFoundException) DirScanPool(org.apache.hadoop.hbase.master.cleaner.DirScanPool) RemoteProcedure(org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure) MetaLocationSyncer(org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer) ReplicationBarrierCleaner(org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner) RegionProcedureStore(org.apache.hadoop.hbase.procedure2.store.region.RegionProcedureStore) SyncReplicationReplayWALManager(org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager) Path(org.apache.hadoop.fs.Path) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) CoprocessorRpcUtils(org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils) RestrictedApi(com.google.errorprone.annotations.RestrictedApi) MasterQuotasObserver(org.apache.hadoop.hbase.quotas.MasterQuotasObserver) QuotaObserverChore(org.apache.hadoop.hbase.quotas.QuotaObserverChore) InetSocketAddress(java.net.InetSocketAddress) List(java.util.List) CompactionState(org.apache.hadoop.hbase.client.CompactionState) BalancerChore(org.apache.hadoop.hbase.master.balancer.BalancerChore) PleaseRestartMasterException(org.apache.hadoop.hbase.PleaseRestartMasterException) SpaceQuotaSnapshotNotifierFactory(org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory) EnvironmentEdgeManager(org.apache.hadoop.hbase.util.EnvironmentEdgeManager) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) MetaTableAccessor(org.apache.hadoop.hbase.MetaTableAccessor) RegionInfoBuilder(org.apache.hadoop.hbase.client.RegionInfoBuilder) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) InitMetaProcedure(org.apache.hadoop.hbase.master.procedure.InitMetaProcedure) SnapshotDescription(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription) MasterNotRunningException(org.apache.hadoop.hbase.MasterNotRunningException) ModifyRegionUtils(org.apache.hadoop.hbase.util.ModifyRegionUtils) NamespaceDescriptor(org.apache.hadoop.hbase.NamespaceDescriptor) HashMap(java.util.HashMap) TableState(org.apache.hadoop.hbase.client.TableState) LoadBalancerFactory(org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory) LockedResource(org.apache.hadoop.hbase.procedure2.LockedResource) SnapshotCleanerChore(org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore) RegionNormalizerFactory(org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory) RpcServer(org.apache.hadoop.hbase.ipc.RpcServer) ReplicationUtils(org.apache.hadoop.hbase.replication.ReplicationUtils) MasterRegion(org.apache.hadoop.hbase.master.region.MasterRegion) RSGroupUtil(org.apache.hadoop.hbase.rsgroup.RSGroupUtil) Iterator(java.util.Iterator) HBaseMarkers(org.apache.hadoop.hbase.log.HBaseMarkers) ProcedurePrepareLatch(org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch) VersionInfo(org.apache.hadoop.hbase.util.VersionInfo) Put(org.apache.hadoop.hbase.client.Put) FavoredNodesManager(org.apache.hadoop.hbase.favored.FavoredNodesManager) CreateTableProcedure(org.apache.hadoop.hbase.master.procedure.CreateTableProcedure) ProcedureEvent(org.apache.hadoop.hbase.procedure2.ProcedureEvent) MemoryBoundedLogMessageBuffer(org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer) TimeUnit(java.util.concurrent.TimeUnit) ReplicationLogCleaner(org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner) DisableTableProcedure(org.apache.hadoop.hbase.master.procedure.DisableTableProcedure) Addressing(org.apache.hadoop.hbase.util.Addressing) Collections(java.util.Collections) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) MobFileCompactionChore(org.apache.hadoop.hbase.mob.MobFileCompactionChore) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) InterruptedIOException(java.io.InterruptedIOException) RegionStatesCount(org.apache.hadoop.hbase.client.RegionStatesCount) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) TableName(org.apache.hadoop.hbase.TableName) ServerName(org.apache.hadoop.hbase.ServerName) ClusterMetricsBuilder(org.apache.hadoop.hbase.ClusterMetricsBuilder) Option(org.apache.hadoop.hbase.ClusterMetrics.Option) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) Map(java.util.Map) HashMap(java.util.HashMap)

Example 2 with AssignmentManager

use of org.apache.hadoop.hbase.master.assignment.AssignmentManager in project hbase by apache.

the class CloneSnapshotProcedure method executeFromState.

@Override
protected Flow executeFromState(final MasterProcedureEnv env, final CloneSnapshotState state) throws InterruptedException {
    LOG.trace("{} execute state={}", this, state);
    try {
        switch(state) {
            case CLONE_SNAPSHOT_PRE_OPERATION:
                // Verify if we can clone the table
                prepareClone(env);
                preCloneSnapshot(env);
                setNextState(CloneSnapshotState.CLONE_SNAPSHOT_WRITE_FS_LAYOUT);
                break;
            case CLONE_SNAPSHOT_WRITE_FS_LAYOUT:
                updateTableDescriptorWithSFT();
                newRegions = createFilesystemLayout(env, tableDescriptor, newRegions);
                env.getMasterServices().getTableDescriptors().update(tableDescriptor, true);
                setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META);
                break;
            case CLONE_SNAPSHOT_ADD_TO_META:
                addRegionsToMeta(env);
                setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ASSIGN_REGIONS);
                break;
            case CLONE_SNAPSHOT_ASSIGN_REGIONS:
                CreateTableProcedure.setEnablingState(env, getTableName());
                // Separate newRegions to split regions and regions to assign
                List<RegionInfo> splitRegions = new ArrayList<>();
                List<RegionInfo> regionsToAssign = new ArrayList<>();
                newRegions.forEach(ri -> {
                    if (ri.isOffline() && (ri.isSplit() || ri.isSplitParent())) {
                        splitRegions.add(ri);
                    } else {
                        regionsToAssign.add(ri);
                    }
                });
                // For split regions, add them to RegionStates
                AssignmentManager am = env.getAssignmentManager();
                splitRegions.forEach(ri -> am.getRegionStates().updateRegionState(ri, RegionState.State.SPLIT));
                addChildProcedure(env.getAssignmentManager().createRoundRobinAssignProcedures(regionsToAssign));
                setNextState(CloneSnapshotState.CLONE_SNAPSHOT_UPDATE_DESC_CACHE);
                break;
            case CLONE_SNAPSHOT_UPDATE_DESC_CACHE:
                // XXX: this stage should be named as set table enabled, as now we will cache the
                // descriptor after writing fs layout.
                CreateTableProcedure.setEnabledState(env, getTableName());
                setNextState(CloneSnapshotState.CLONE_SNAPHOST_RESTORE_ACL);
                break;
            case CLONE_SNAPHOST_RESTORE_ACL:
                restoreSnapshotAcl(env);
                setNextState(CloneSnapshotState.CLONE_SNAPSHOT_POST_OPERATION);
                break;
            case CLONE_SNAPSHOT_POST_OPERATION:
                postCloneSnapshot(env);
                MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
                metricsSnapshot.addSnapshotClone(getMonitorStatus().getCompletionTimestamp() - getMonitorStatus().getStartTime());
                getMonitorStatus().markComplete("Clone snapshot '" + snapshot.getName() + "' completed!");
                return Flow.NO_MORE_STATE;
            default:
                throw new UnsupportedOperationException("unhandled state=" + state);
        }
    } catch (IOException e) {
        if (isRollbackSupported(state)) {
            setFailure("master-clone-snapshot", e);
        } else {
            LOG.warn("Retriable error trying to clone snapshot=" + snapshot.getName() + " to table=" + getTableName() + " state=" + state, e);
        }
    }
    return Flow.HAS_MORE_STATE;
}
Also used : MetricsSnapshot(org.apache.hadoop.hbase.master.MetricsSnapshot) ArrayList(java.util.ArrayList) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException)

Example 3 with AssignmentManager

use of org.apache.hadoop.hbase.master.assignment.AssignmentManager in project hbase by apache.

the class MasterDumpServlet method dumpRIT.

private void dumpRIT(HMaster master, PrintWriter out) {
    AssignmentManager am = master.getAssignmentManager();
    if (am == null) {
        out.println("AssignmentManager is not initialized");
        return;
    }
    for (RegionStateNode rs : am.getRegionsInTransition()) {
        String rid = rs.getRegionInfo().getEncodedName();
        out.println("Region " + rid + ": " + rs.toDescriptiveString());
    }
}
Also used : AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) RegionStateNode(org.apache.hadoop.hbase.master.assignment.RegionStateNode)

Example 4 with AssignmentManager

use of org.apache.hadoop.hbase.master.assignment.AssignmentManager in project hbase by apache.

the class ServerCrashProcedure method assignRegions.

/**
 * Assign the regions on the crashed RS to other Rses.
 * <p/>
 * In this method we will go through all the RegionStateNodes of the give regions to find out
 * whether there is already an TRSP for the region, if so we interrupt it and let it retry on
 * other server, otherwise we will schedule a TRSP to bring the region online.
 * <p/>
 * We will also check whether the table for a region is enabled, if not, we will skip assigning
 * it.
 */
private void assignRegions(MasterProcedureEnv env, List<RegionInfo> regions) throws IOException {
    AssignmentManager am = env.getMasterServices().getAssignmentManager();
    boolean retainAssignment = env.getMasterConfiguration().getBoolean(MASTER_SCP_RETAIN_ASSIGNMENT, DEFAULT_MASTER_SCP_RETAIN_ASSIGNMENT);
    for (RegionInfo region : regions) {
        RegionStateNode regionNode = am.getRegionStates().getOrCreateRegionStateNode(region);
        regionNode.lock();
        try {
            // in the way of our clearing out 'Unknown Servers'.
            if (!isMatchingRegionLocation(regionNode)) {
                // double checking here to confirm that we do not skip assignment incorrectly.
                if (!am.isRunning()) {
                    throw new DoNotRetryIOException("AssignmentManager has been stopped, can not process assignment any more");
                }
                LOG.info("{} found {} whose regionLocation no longer matches {}, skipping assign...", this, regionNode, serverName);
                continue;
            }
            if (regionNode.getProcedure() != null) {
                LOG.info("{} found RIT {}; {}", this, regionNode.getProcedure(), regionNode);
                regionNode.getProcedure().serverCrashed(env, regionNode, getServerName(), !retainAssignment);
                continue;
            }
            if (env.getMasterServices().getTableStateManager().isTableState(regionNode.getTable(), TableState.State.DISABLING)) {
                // We need to change the state here otherwise the TRSP scheduled by DTP will try to
                // close the region from a dead server and will never succeed. Please see HBASE-23636
                // for more details.
                env.getAssignmentManager().regionClosedAbnormally(regionNode);
                LOG.info("{} found table disabling for region {}, set it state to ABNORMALLY_CLOSED.", this, regionNode);
                continue;
            }
            if (env.getMasterServices().getTableStateManager().isTableState(regionNode.getTable(), TableState.State.DISABLED)) {
                // This should not happen, table disabled but has regions on server.
                LOG.warn("Found table disabled for region {}, procDetails: {}", regionNode, this);
                continue;
            }
            TransitRegionStateProcedure proc = TransitRegionStateProcedure.assign(env, region, !retainAssignment, null);
            regionNode.setProcedure(proc);
            addChildProcedure(proc);
        } finally {
            regionNode.unlock();
        }
    }
}
Also used : TransitRegionStateProcedure(org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionStateNode(org.apache.hadoop.hbase.master.assignment.RegionStateNode)

Example 5 with AssignmentManager

use of org.apache.hadoop.hbase.master.assignment.AssignmentManager in project hbase by apache.

the class ServerCrashProcedure method zkCoordinatedSplitLogs.

/**
 * Split logs using 'classic' zk-based coordination.
 * Superceded by procedure-based WAL splitting.
 * @see #createSplittingWalProcedures(MasterProcedureEnv, boolean)
 */
private void zkCoordinatedSplitLogs(final MasterProcedureEnv env) throws IOException {
    LOG.debug("Splitting WALs {}", this);
    MasterWalManager mwm = env.getMasterServices().getMasterWalManager();
    AssignmentManager am = env.getMasterServices().getAssignmentManager();
    // TODO: For Matteo. Below BLOCKs!!!! Redo so can relinquish executor while it is running.
    // PROBLEM!!! WE BLOCK HERE. Can block for hours if hundreds of WALs to split and hundreds
    // of SCPs running because big cluster crashed down.
    am.getRegionStates().logSplitting(this.serverName);
    mwm.splitLog(this.serverName);
    if (!carryingMeta) {
        mwm.archiveMetaLog(this.serverName);
    }
    am.getRegionStates().logSplit(this.serverName);
    LOG.debug("Done splitting WALs {}", this);
}
Also used : MasterWalManager(org.apache.hadoop.hbase.master.MasterWalManager) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager)

Aggregations

AssignmentManager (org.apache.hadoop.hbase.master.assignment.AssignmentManager)30 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)13 Test (org.junit.Test)13 TableName (org.apache.hadoop.hbase.TableName)10 HMaster (org.apache.hadoop.hbase.master.HMaster)10 IOException (java.io.IOException)9 RegionStates (org.apache.hadoop.hbase.master.assignment.RegionStates)9 ServerName (org.apache.hadoop.hbase.ServerName)7 RegionStateNode (org.apache.hadoop.hbase.master.assignment.RegionStateNode)6 TransitRegionStateProcedure (org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure)5 ArrayList (java.util.ArrayList)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 List (java.util.List)3 Map (java.util.Map)3 Path (org.apache.hadoop.fs.Path)3 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)3 Admin (org.apache.hadoop.hbase.client.Admin)3 HashMap (java.util.HashMap)2 HashSet (java.util.HashSet)2 ExecutionException (java.util.concurrent.ExecutionException)2