Search in sources :

Example 11 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class TestRegionServerReadRequestMetrics method updateMetricsMap.

private static void updateMetricsMap() throws IOException, InterruptedException {
    for (Metric metric : Metric.values()) {
        requestsMapPrev.put(metric, requestsMap.get(metric));
    }
    ServerLoad serverLoad = null;
    RegionLoad regionLoadOuter = null;
    boolean metricsUpdated = false;
    for (int i = 0; i < MAX_TRY; i++) {
        for (ServerName serverName : serverNames) {
            serverLoad = admin.getClusterStatus().getLoad(serverName);
            Map<byte[], RegionLoad> regionsLoad = serverLoad.getRegionsLoad();
            for (HRegionInfo tableRegion : tableRegions) {
                RegionLoad regionLoad = regionsLoad.get(tableRegion.getRegionName());
                if (regionLoad != null) {
                    regionLoadOuter = regionLoad;
                    for (Metric metric : Metric.values()) {
                        if (getReadRequest(serverLoad, regionLoad, metric) > requestsMapPrev.get(metric)) {
                            for (Metric metricInner : Metric.values()) {
                                requestsMap.put(metricInner, getReadRequest(serverLoad, regionLoad, metricInner));
                            }
                            metricsUpdated = true;
                            break;
                        }
                    }
                }
            }
        }
        if (metricsUpdated) {
            break;
        }
        Thread.sleep(SLEEP_MS);
    }
    if (!metricsUpdated) {
        for (Metric metric : Metric.values()) {
            requestsMap.put(metric, getReadRequest(serverLoad, regionLoadOuter, metric));
        }
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ServerLoad(org.apache.hadoop.hbase.ServerLoad) RegionLoad(org.apache.hadoop.hbase.RegionLoad) ServerName(org.apache.hadoop.hbase.ServerName)

Example 12 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class Action method unbalanceRegions.

protected void unbalanceRegions(ClusterStatus clusterStatus, List<ServerName> fromServers, List<ServerName> toServers, double fractionOfRegions) throws Exception {
    List<byte[]> victimRegions = new LinkedList<>();
    for (ServerName server : fromServers) {
        ServerLoad serverLoad = clusterStatus.getLoad(server);
        // Ugh.
        List<byte[]> regions = new LinkedList<>(serverLoad.getRegionsLoad().keySet());
        int victimRegionCount = (int) Math.ceil(fractionOfRegions * regions.size());
        LOG.debug("Removing " + victimRegionCount + " regions from " + server.getServerName());
        for (int i = 0; i < victimRegionCount; ++i) {
            int victimIx = RandomUtils.nextInt(regions.size());
            String regionId = HRegionInfo.encodeRegionName(regions.remove(victimIx));
            victimRegions.add(Bytes.toBytes(regionId));
        }
    }
    LOG.info("Moving " + victimRegions.size() + " regions from " + fromServers.size() + " servers to " + toServers.size() + " different servers");
    Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin();
    for (byte[] victimRegion : victimRegions) {
        // trying to stop the monkey.
        if (context.isStopping()) {
            break;
        }
        int targetIx = RandomUtils.nextInt(toServers.size());
        admin.move(victimRegion, Bytes.toBytes(toServers.get(targetIx).getServerName()));
    }
}
Also used : ServerLoad(org.apache.hadoop.hbase.ServerLoad) ServerName(org.apache.hadoop.hbase.ServerName) Admin(org.apache.hadoop.hbase.client.Admin) LinkedList(java.util.LinkedList)

Example 13 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class ProtobufUtil method convert.

/**
   * Convert a protobuf ClusterStatus to a ClusterStatus
   *
   * @param proto the protobuf ClusterStatus
   * @return the converted ClusterStatus
   */
public static ClusterStatus convert(ClusterStatusProtos.ClusterStatus proto) {
    Map<ServerName, ServerLoad> servers = null;
    servers = new HashMap<>(proto.getLiveServersList().size());
    for (LiveServerInfo lsi : proto.getLiveServersList()) {
        servers.put(ProtobufUtil.toServerName(lsi.getServer()), new ServerLoad(lsi.getServerLoad()));
    }
    Collection<ServerName> deadServers = null;
    deadServers = new ArrayList<>(proto.getDeadServersList().size());
    for (HBaseProtos.ServerName sn : proto.getDeadServersList()) {
        deadServers.add(ProtobufUtil.toServerName(sn));
    }
    Collection<ServerName> backupMasters = null;
    backupMasters = new ArrayList<>(proto.getBackupMastersList().size());
    for (HBaseProtos.ServerName sn : proto.getBackupMastersList()) {
        backupMasters.add(ProtobufUtil.toServerName(sn));
    }
    Set<RegionState> rit = null;
    rit = new HashSet<>(proto.getRegionsInTransitionList().size());
    for (RegionInTransition region : proto.getRegionsInTransitionList()) {
        RegionState value = RegionState.convert(region.getRegionState());
        rit.add(value);
    }
    String[] masterCoprocessors = null;
    final int numMasterCoprocessors = proto.getMasterCoprocessorsCount();
    masterCoprocessors = new String[numMasterCoprocessors];
    for (int i = 0; i < numMasterCoprocessors; i++) {
        masterCoprocessors[i] = proto.getMasterCoprocessors(i).getName();
    }
    return new ClusterStatus(proto.getHbaseVersion().getVersion(), ClusterId.convert(proto.getClusterId()).toString(), servers, deadServers, ProtobufUtil.toServerName(proto.getMaster()), backupMasters, rit, masterCoprocessors, proto.getBalancerOn());
}
Also used : ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) HBaseProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos) ServerLoad(org.apache.hadoop.hbase.ServerLoad) RegionInTransition(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionInTransition) RegionState(org.apache.hadoop.hbase.master.RegionState) LiveServerInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.LiveServerInfo) ServerName(org.apache.hadoop.hbase.ServerName) ClusterStatus(org.apache.hadoop.hbase.ClusterStatus)

Example 14 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class FavoredNodeLoadBalancer method balanceCluster.

@Override
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
    //TODO. Look at is whether Stochastic loadbalancer can be integrated with this
    List<RegionPlan> plans = new ArrayList<>();
    //perform a scan of the meta to get the latest updates (if any)
    SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment = new SnapshotOfRegionAssignmentFromMeta(super.services.getConnection());
    try {
        snaphotOfRegionAssignment.initialize();
    } catch (IOException ie) {
        LOG.warn("Not running balancer since exception was thrown " + ie);
        return plans;
    }
    Map<ServerName, ServerName> serverNameToServerNameWithoutCode = new HashMap<>();
    Map<ServerName, ServerName> serverNameWithoutCodeToServerName = new HashMap<>();
    ServerManager serverMgr = super.services.getServerManager();
    for (ServerName sn : serverMgr.getOnlineServersList()) {
        ServerName s = ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE);
        serverNameToServerNameWithoutCode.put(sn, s);
        serverNameWithoutCodeToServerName.put(s, sn);
    }
    for (Map.Entry<ServerName, List<HRegionInfo>> entry : clusterState.entrySet()) {
        ServerName currentServer = entry.getKey();
        //get a server without the startcode for the currentServer
        ServerName currentServerWithoutStartCode = ServerName.valueOf(currentServer.getHostname(), currentServer.getPort(), ServerName.NON_STARTCODE);
        List<HRegionInfo> list = entry.getValue();
        for (HRegionInfo region : list) {
            if (!FavoredNodesManager.isFavoredNodeApplicable(region)) {
                continue;
            }
            List<ServerName> favoredNodes = fnm.getFavoredNodes(region);
            if (favoredNodes == null || favoredNodes.get(0).equals(currentServerWithoutStartCode)) {
                //either favorednodes does not exist or we are already on the primary node
                continue;
            }
            ServerName destination = null;
            //check whether the primary is available
            destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(0));
            if (destination == null) {
                //check whether the region is on secondary/tertiary
                if (currentServerWithoutStartCode.equals(favoredNodes.get(1)) || currentServerWithoutStartCode.equals(favoredNodes.get(2))) {
                    continue;
                }
                //the region is currently on none of the favored nodes
                //get it on one of them if possible
                ServerLoad l1 = super.services.getServerManager().getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(1)));
                ServerLoad l2 = super.services.getServerManager().getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(2)));
                if (l1 != null && l2 != null) {
                    if (l1.getLoad() > l2.getLoad()) {
                        destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2));
                    } else {
                        destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1));
                    }
                } else if (l1 != null) {
                    destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1));
                } else if (l2 != null) {
                    destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2));
                }
            }
            if (destination != null) {
                RegionPlan plan = new RegionPlan(region, currentServer, destination);
                plans.add(plan);
            }
        }
    }
    return plans;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) IOException(java.io.IOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ServerLoad(org.apache.hadoop.hbase.ServerLoad) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map)

Example 15 with ServerLoad

use of org.apache.hadoop.hbase.ServerLoad in project hbase by apache.

the class HMaster method getClusterStatus.

/**
   * @return cluster status
   */
public ClusterStatus getClusterStatus() throws InterruptedIOException {
    // Build Set of backup masters from ZK nodes
    List<String> backupMasterStrings;
    try {
        backupMasterStrings = ZKUtil.listChildrenNoWatch(this.zooKeeper, this.zooKeeper.znodePaths.backupMasterAddressesZNode);
    } catch (KeeperException e) {
        LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e);
        backupMasterStrings = null;
    }
    List<ServerName> backupMasters = null;
    if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) {
        backupMasters = new ArrayList<>(backupMasterStrings.size());
        for (String s : backupMasterStrings) {
            try {
                byte[] bytes;
                try {
                    bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(this.zooKeeper.znodePaths.backupMasterAddressesZNode, s));
                } catch (InterruptedException e) {
                    throw new InterruptedIOException();
                }
                if (bytes != null) {
                    ServerName sn;
                    try {
                        sn = ProtobufUtil.parseServerNameFrom(bytes);
                    } catch (DeserializationException e) {
                        LOG.warn("Failed parse, skipping registering backup server", e);
                        continue;
                    }
                    backupMasters.add(sn);
                }
            } catch (KeeperException e) {
                LOG.warn(this.zooKeeper.prefix("Unable to get information about " + "backup servers"), e);
            }
        }
        Collections.sort(backupMasters, new Comparator<ServerName>() {

            @Override
            public int compare(ServerName s1, ServerName s2) {
                return s1.getServerName().compareTo(s2.getServerName());
            }
        });
    }
    String clusterId = fileSystemManager != null ? fileSystemManager.getClusterId().toString() : null;
    Set<RegionState> regionsInTransition = assignmentManager != null ? assignmentManager.getRegionStates().getRegionsInTransition() : null;
    String[] coprocessors = cpHost != null ? getMasterCoprocessors() : null;
    boolean balancerOn = loadBalancerTracker != null ? loadBalancerTracker.isBalancerOn() : false;
    Map<ServerName, ServerLoad> onlineServers = null;
    Set<ServerName> deadServers = null;
    if (serverManager != null) {
        deadServers = serverManager.getDeadServers().copyServerNames();
        onlineServers = serverManager.getOnlineServers();
    }
    return new ClusterStatus(VersionInfo.getVersion(), clusterId, onlineServers, deadServers, serverName, backupMasters, regionsInTransition, coprocessors, balancerOn);
}
Also used : InterruptedIOException(java.io.InterruptedIOException) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) ServerLoad(org.apache.hadoop.hbase.ServerLoad) ServerName(org.apache.hadoop.hbase.ServerName) KeeperException(org.apache.zookeeper.KeeperException) ClusterStatus(org.apache.hadoop.hbase.ClusterStatus)

Aggregations

ServerLoad (org.apache.hadoop.hbase.ServerLoad)15 ServerName (org.apache.hadoop.hbase.ServerName)13 ClusterStatus (org.apache.hadoop.hbase.ClusterStatus)6 RegionLoad (org.apache.hadoop.hbase.RegionLoad)5 IOException (java.io.IOException)4 Test (org.junit.Test)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 List (java.util.List)3 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 Map (java.util.Map)2 TreeMap (java.util.TreeMap)2 Admin (org.apache.hadoop.hbase.client.Admin)2 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)2 InterruptedIOException (java.io.InterruptedIOException)1 ArrayDeque (java.util.ArrayDeque)1 Deque (java.util.Deque)1 LinkedList (java.util.LinkedList)1 GET (javax.ws.rs.GET)1 Produces (javax.ws.rs.Produces)1