Search in sources :

Example 11 with RegionState

use of org.apache.hadoop.hbase.master.RegionState in project hbase by apache.

the class ProtobufUtil method convert.

/**
   * Convert a ClusterStatus to a protobuf ClusterStatus
   *
   * @return the protobuf ClusterStatus
   */
public static ClusterStatusProtos.ClusterStatus convert(ClusterStatus status) {
    ClusterStatusProtos.ClusterStatus.Builder builder = ClusterStatusProtos.ClusterStatus.newBuilder();
    builder.setHbaseVersion(HBaseVersionFileContent.newBuilder().setVersion(status.getHBaseVersion()));
    if (status.getServers() != null) {
        for (ServerName serverName : status.getServers()) {
            LiveServerInfo.Builder lsi = LiveServerInfo.newBuilder().setServer(ProtobufUtil.toServerName(serverName));
            status.getLoad(serverName);
            lsi.setServerLoad(status.getLoad(serverName).obtainServerLoadPB());
            builder.addLiveServers(lsi.build());
        }
    }
    if (status.getDeadServerNames() != null) {
        for (ServerName deadServer : status.getDeadServerNames()) {
            builder.addDeadServers(ProtobufUtil.toServerName(deadServer));
        }
    }
    if (status.getRegionsInTransition() != null) {
        for (RegionState rit : status.getRegionsInTransition()) {
            ClusterStatusProtos.RegionState rs = rit.convert();
            RegionSpecifier.Builder spec = RegionSpecifier.newBuilder().setType(RegionSpecifierType.REGION_NAME);
            spec.setValue(UnsafeByteOperations.unsafeWrap(rit.getRegion().getRegionName()));
            RegionInTransition pbRIT = RegionInTransition.newBuilder().setSpec(spec.build()).setRegionState(rs).build();
            builder.addRegionsInTransition(pbRIT);
        }
    }
    if (status.getClusterId() != null) {
        builder.setClusterId(new ClusterId(status.getClusterId()).convert());
    }
    if (status.getMasterCoprocessors() != null) {
        for (String coprocessor : status.getMasterCoprocessors()) {
            builder.addMasterCoprocessors(HBaseProtos.Coprocessor.newBuilder().setName(coprocessor));
        }
    }
    if (status.getMaster() != null) {
        builder.setMaster(ProtobufUtil.toServerName(status.getMaster()));
    }
    if (status.getBackupMasters() != null) {
        for (ServerName backup : status.getBackupMasters()) {
            builder.addBackupMasters(ProtobufUtil.toServerName(backup));
        }
    }
    if (status.getBalancerOn() != null) {
        builder.setBalancerOn(status.getBalancerOn());
    }
    return builder.build();
}
Also used : ClusterStatusProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos) ClusterId(org.apache.hadoop.hbase.ClusterId) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) RegionSpecifier(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier) RegionInTransition(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition) RegionState(org.apache.hadoop.hbase.master.RegionState) LiveServerInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.LiveServerInfo) ServerName(org.apache.hadoop.hbase.ServerName) ClusterStatus(org.apache.hadoop.hbase.ClusterStatus)

Example 12 with RegionState

use of org.apache.hadoop.hbase.master.RegionState in project hbase by apache.

the class TestHBaseFsckOneRS method testCleanUpDaughtersNotInMetaAfterFailedSplit.

@Test(timeout = 180000)
public void testCleanUpDaughtersNotInMetaAfterFailedSplit() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    try {
        HTableDescriptor desc = new HTableDescriptor(tableName);
        desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f")));
        createTable(TEST_UTIL, desc, null);
        tbl = connection.getTable(desc.getTableName());
        for (int i = 0; i < 5; i++) {
            Put p1 = new Put(("r" + i).getBytes());
            p1.addColumn(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes());
            tbl.put(p1);
        }
        admin.flush(desc.getTableName());
        List<HRegion> regions = cluster.getRegions(desc.getTableName());
        int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName());
        HRegionServer regionServer = cluster.getRegionServer(serverWith);
        byte[] parentRegionName = regions.get(0).getRegionInfo().getRegionName();
        cluster.getServerWith(parentRegionName);
        // Create daughters without adding to META table
        MasterProcedureEnv env = cluster.getMaster().getMasterProcedureExecutor().getEnvironment();
        SplitTableRegionProcedure splitR = new SplitTableRegionProcedure(env, regions.get(0).getRegionInfo(), Bytes.toBytes("r3"));
        splitR.prepareSplitRegion(env);
        splitR.setRegionStateToSplitting(env);
        splitR.closeParentRegionForSplit(env);
        splitR.createDaughterRegions(env);
        AssignmentManager am = cluster.getMaster().getAssignmentManager();
        for (RegionState state : am.getRegionStates().getRegionsInTransition()) {
            am.regionOffline(state.getRegion());
        }
        Map<HRegionInfo, ServerName> regionsMap = new HashMap<>();
        regionsMap.put(regions.get(0).getRegionInfo(), regionServer.getServerName());
        am.assign(regionsMap);
        am.waitForAssignment(regions.get(0).getRegionInfo());
        HBaseFsck hbck = doFsck(conf, false);
        assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
        // holes are separate from overlap groups
        assertEquals(0, hbck.getOverlapGroups(tableName).size());
        // fix hole
        assertErrors(doFsck(conf, false, true, false, false, false, false, false, false, false, false, false, false, null), new HBaseFsck.ErrorReporter.ERROR_CODE[] { HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED });
        // check that hole fixed
        assertNoErrors(doFsck(conf, false));
        assertEquals(5, countRows());
    } finally {
        if (tbl != null) {
            tbl.close();
            tbl = null;
        }
        cleanupTable(tableName);
    }
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HashMap(java.util.HashMap) AssignmentManager(org.apache.hadoop.hbase.master.AssignmentManager) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) SplitTableRegionProcedure(org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionState(org.apache.hadoop.hbase.master.RegionState) ServerName(org.apache.hadoop.hbase.ServerName) Test(org.junit.Test)

Example 13 with RegionState

use of org.apache.hadoop.hbase.master.RegionState in project hbase by apache.

the class RSGroupAdminServer method balanceRSGroup.

@Override
public boolean balanceRSGroup(String groupName) throws IOException {
    ServerManager serverManager = master.getServerManager();
    AssignmentManager assignmentManager = master.getAssignmentManager();
    LoadBalancer balancer = master.getLoadBalancer();
    boolean balancerRan;
    synchronized (balancer) {
        if (master.getMasterCoprocessorHost() != null) {
            master.getMasterCoprocessorHost().preBalanceRSGroup(groupName);
        }
        if (getRSGroupInfo(groupName) == null) {
            throw new ConstraintException("RSGroup does not exist: " + groupName);
        }
        // Only allow one balance run at at time.
        Map<String, RegionState> groupRIT = rsGroupGetRegionsInTransition(groupName);
        if (groupRIT.size() > 0) {
            LOG.debug("Not running balancer because " + groupRIT.size() + " region(s) in transition: " + StringUtils.abbreviate(master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), 256));
            return false;
        }
        if (serverManager.areDeadServersInProgress()) {
            LOG.debug("Not running balancer because processing dead regionserver(s): " + serverManager.getDeadServers());
            return false;
        }
        //We balance per group instead of per table
        List<RegionPlan> plans = new ArrayList<>();
        for (Map.Entry<TableName, Map<ServerName, List<HRegionInfo>>> tableMap : getRSGroupAssignmentsByTable(groupName).entrySet()) {
            LOG.info("Creating partial plan for table " + tableMap.getKey() + ": " + tableMap.getValue());
            List<RegionPlan> partialPlans = balancer.balanceCluster(tableMap.getValue());
            LOG.info("Partial plan for table " + tableMap.getKey() + ": " + partialPlans);
            if (partialPlans != null) {
                plans.addAll(partialPlans);
            }
        }
        long startTime = System.currentTimeMillis();
        balancerRan = plans != null;
        if (plans != null && !plans.isEmpty()) {
            LOG.info("RSGroup balance " + groupName + " starting with plan count: " + plans.size());
            for (RegionPlan plan : plans) {
                LOG.info("balance " + plan);
                assignmentManager.balance(plan);
            }
            LOG.info("RSGroup balance " + groupName + " completed after " + (System.currentTimeMillis() - startTime) + " seconds");
        }
        if (master.getMasterCoprocessorHost() != null) {
            master.getMasterCoprocessorHost().postBalanceRSGroup(groupName, balancerRan);
        }
    }
    return balancerRan;
}
Also used : ServerManager(org.apache.hadoop.hbase.master.ServerManager) AssignmentManager(org.apache.hadoop.hbase.master.AssignmentManager) ArrayList(java.util.ArrayList) LoadBalancer(org.apache.hadoop.hbase.master.LoadBalancer) ConstraintException(org.apache.hadoop.hbase.constraint.ConstraintException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) RegionState(org.apache.hadoop.hbase.master.RegionState) RegionPlan(org.apache.hadoop.hbase.master.RegionPlan) HashMap(java.util.HashMap) Map(java.util.Map)

Example 14 with RegionState

use of org.apache.hadoop.hbase.master.RegionState in project hbase by apache.

the class ClusterStatus method toString.

public String toString() {
    StringBuilder sb = new StringBuilder(1024);
    sb.append("Master: " + master);
    int backupMastersSize = getBackupMastersSize();
    sb.append("\nNumber of backup masters: " + backupMastersSize);
    if (backupMastersSize > 0) {
        for (ServerName serverName : backupMasters) {
            sb.append("\n  " + serverName);
        }
    }
    int serversSize = getServersSize();
    sb.append("\nNumber of live region servers: " + serversSize);
    if (serversSize > 0) {
        for (ServerName serverName : liveServers.keySet()) {
            sb.append("\n  " + serverName.getServerName());
        }
    }
    int deadServerSize = getDeadServersSize();
    sb.append("\nNumber of dead region servers: " + deadServerSize);
    if (deadServerSize > 0) {
        for (ServerName serverName : deadServers) {
            sb.append("\n  " + serverName);
        }
    }
    sb.append("\nAverage load: " + getAverageLoad());
    sb.append("\nNumber of requests: " + getRequestsCount());
    sb.append("\nNumber of regions: " + getRegionsCount());
    int ritSize = (intransition != null) ? intransition.size() : 0;
    sb.append("\nNumber of regions in transition: " + ritSize);
    if (ritSize > 0) {
        for (RegionState state : intransition) {
            sb.append("\n  " + state.toDescriptiveString());
        }
    }
    return sb.toString();
}
Also used : RegionState(org.apache.hadoop.hbase.master.RegionState)

Aggregations

RegionState (org.apache.hadoop.hbase.master.RegionState)14 ServerName (org.apache.hadoop.hbase.ServerName)6 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)5 AssignmentManager (org.apache.hadoop.hbase.master.AssignmentManager)4 TableName (org.apache.hadoop.hbase.TableName)3 RegionStates (org.apache.hadoop.hbase.master.RegionStates)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 ClusterStatus (org.apache.hadoop.hbase.ClusterStatus)2 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)2 LiveServerInfo (org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.LiveServerInfo)2 RegionInTransition (org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition)2 HBaseProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 Map (java.util.Map)1 Lock (java.util.concurrent.locks.Lock)1 Configuration (org.apache.hadoop.conf.Configuration)1 ClusterId (org.apache.hadoop.hbase.ClusterId)1 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)1