Search in sources :

Example 26 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class RegionsRecoveryChore method prepareTableToReopenRegionsMap.

private void prepareTableToReopenRegionsMap(final Map<TableName, List<byte[]>> tableToReopenRegionsMap, final byte[] regionName, final int regionStoreRefCount) {
    final RegionInfo regionInfo = hMaster.getAssignmentManager().getRegionInfo(regionName);
    final TableName tableName = regionInfo.getTable();
    if (TableName.isMetaTableName(tableName)) {
        // high store file reference count
        return;
    }
    LOG.warn("Region {} for Table {} has high storeFileRefCount {}, considering it for reopen..", regionInfo.getRegionNameAsString(), tableName, regionStoreRefCount);
    tableToReopenRegionsMap.computeIfAbsent(tableName, (key) -> new ArrayList<>()).add(regionName);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) PerClientRandomNonceGenerator(org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator) Logger(org.slf4j.Logger) LoggerFactory(org.slf4j.LoggerFactory) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) MapUtils(org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils) IOException(java.io.IOException) HashMap(java.util.HashMap) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) Stoppable(org.apache.hadoop.hbase.Stoppable) ArrayList(java.util.ArrayList) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics) List(java.util.List) HConstants(org.apache.hadoop.hbase.HConstants) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ServerName(org.apache.hadoop.hbase.ServerName) ScheduledChore(org.apache.hadoop.hbase.ScheduledChore) TableName(org.apache.hadoop.hbase.TableName) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo)

Example 27 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class FavoredNodeAssignmentHelper method placeSecondaryAndTertiaryWithRestrictions.

/**
 * For regions that share the primary, avoid placing the secondary and tertiary
 * on a same RS. Used for generating new assignments for the
 * primary/secondary/tertiary RegionServers
 * @param primaryRSMap
 * @return the map of regions to the servers the region-files should be hosted on
 */
public Map<RegionInfo, ServerName[]> placeSecondaryAndTertiaryWithRestrictions(Map<RegionInfo, ServerName> primaryRSMap) {
    Map<ServerName, Set<RegionInfo>> serverToPrimaries = mapRSToPrimaries(primaryRSMap);
    Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = new HashMap<>();
    for (Entry<RegionInfo, ServerName> entry : primaryRSMap.entrySet()) {
        // Get the target region and its primary region server rack
        RegionInfo regionInfo = entry.getKey();
        ServerName primaryRS = entry.getValue();
        try {
            // Get the rack for the primary region server
            String primaryRack = getRackOfServer(primaryRS);
            ServerName[] favoredNodes = null;
            if (getTotalNumberOfRacks() == 1) {
                // Single rack case: have to pick the secondary and tertiary
                // from the same rack
                favoredNodes = singleRackCase(regionInfo, primaryRS, primaryRack);
            } else {
                favoredNodes = multiRackCaseWithRestrictions(serverToPrimaries, secondaryAndTertiaryMap, primaryRack, primaryRS, regionInfo);
            }
            if (favoredNodes != null) {
                secondaryAndTertiaryMap.put(regionInfo, favoredNodes);
                LOG.debug("Place the secondary and tertiary region server for region " + regionInfo.getRegionNameAsString());
            }
        } catch (Exception e) {
            LOG.warn("Cannot place the favored nodes for region " + regionInfo.getRegionNameAsString() + " because " + e, e);
            continue;
        }
    }
    return secondaryAndTertiaryMap;
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) ServerName(org.apache.hadoop.hbase.ServerName) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException)

Example 28 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class FavoredNodeAssignmentHelper method placePrimaryRSAsRoundRobin.

// Place the regions round-robin across the racks picking one server from each
// rack at a time. Start with a random rack, and a random server from every rack.
// If a rack doesn't have enough servers it will go to the next rack and so on.
// for choosing a primary.
// For example, if 4 racks (r1 .. r4) with 8 servers (s1..s8) each, one possible
// placement could be r2:s5, r3:s5, r4:s5, r1:s5, r2:s6, r3:s6..
// If there were fewer servers in one rack, say r3, which had 3 servers, one possible
// placement could be r2:s5, <skip-r3>, r4:s5, r1:s5, r2:s6, <skip-r3> ...
// The regions should be distributed proportionately to the racksizes
public void placePrimaryRSAsRoundRobin(Map<ServerName, List<RegionInfo>> assignmentMap, Map<RegionInfo, ServerName> primaryRSMap, List<RegionInfo> regions) {
    List<String> rackList = new ArrayList<>(rackToRegionServerMap.size());
    rackList.addAll(rackToRegionServerMap.keySet());
    int rackIndex = ThreadLocalRandom.current().nextInt(rackList.size());
    int maxRackSize = 0;
    for (Map.Entry<String, List<ServerName>> r : rackToRegionServerMap.entrySet()) {
        if (r.getValue().size() > maxRackSize) {
            maxRackSize = r.getValue().size();
        }
    }
    int numIterations = 0;
    // Initialize the current processing host index.
    int serverIndex = ThreadLocalRandom.current().nextInt(maxRackSize);
    for (RegionInfo regionInfo : regions) {
        List<ServerName> currentServerList;
        String rackName;
        while (true) {
            rackName = rackList.get(rackIndex);
            numIterations++;
            // Get the server list for the current rack
            currentServerList = rackToRegionServerMap.get(rackName);
            if (serverIndex >= currentServerList.size()) {
                // not enough machines in this rack
                if (numIterations % rackList.size() == 0) {
                    if (++serverIndex >= maxRackSize)
                        serverIndex = 0;
                }
                if ((++rackIndex) >= rackList.size()) {
                    // reset the rack index to 0
                    rackIndex = 0;
                }
            } else
                break;
        }
        // Get the current process region server
        ServerName currentServer = currentServerList.get(serverIndex);
        // Place the current region with the current primary region server
        primaryRSMap.put(regionInfo, currentServer);
        if (assignmentMap != null) {
            List<RegionInfo> regionsForServer = assignmentMap.get(currentServer);
            if (regionsForServer == null) {
                regionsForServer = new ArrayList<>();
                assignmentMap.put(currentServer, regionsForServer);
            }
            regionsForServer.add(regionInfo);
        }
        // Set the next processing index
        if (numIterations % rackList.size() == 0) {
            ++serverIndex;
        }
        if ((++rackIndex) >= rackList.size()) {
            // reset the rack index to 0
            rackIndex = 0;
        }
    }
}
Also used : ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) HashMap(java.util.HashMap) Map(java.util.Map)

Example 29 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class FavoredNodeAssignmentHelper method updateMetaWithFavoredNodesInfo.

/**
 * Update meta table with favored nodes info
 * @param regionToFavoredNodes map of RegionInfo's to their favored nodes
 * @param connection connection to be used
 */
public static void updateMetaWithFavoredNodesInfo(Map<RegionInfo, List<ServerName>> regionToFavoredNodes, Connection connection) throws IOException {
    List<Put> puts = new ArrayList<>();
    for (Map.Entry<RegionInfo, List<ServerName>> entry : regionToFavoredNodes.entrySet()) {
        Put put = makePut(entry.getKey(), entry.getValue());
        if (put != null) {
            puts.add(put);
        }
    }
    try (Table table = connection.getTable(TableName.META_TABLE_NAME)) {
        table.put(puts);
    }
    LOG.info("Added " + puts.size() + " region favored nodes in META");
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) Put(org.apache.hadoop.hbase.client.Put)

Example 30 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class FavoredNodeLoadBalancer method assignSecondaryAndTertiaryNodesForRegion.

private void assignSecondaryAndTertiaryNodesForRegion(FavoredNodeAssignmentHelper assignmentHelper, List<RegionInfo> regions, Map<RegionInfo, ServerName> primaryRSMap) throws IOException {
    // figure the secondary and tertiary RSs
    Map<RegionInfo, ServerName[]> secondaryAndTertiaryRSMap = assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap);
    Map<RegionInfo, List<ServerName>> regionFNMap = Maps.newHashMap();
    // now record all the assignments so that we can serve queries later
    for (RegionInfo region : regions) {
        // Store the favored nodes without startCode for the ServerName objects
        // We don't care about the startcode; but only the hostname really
        List<ServerName> favoredNodesForRegion = new ArrayList<>(3);
        ServerName sn = primaryRSMap.get(region);
        favoredNodesForRegion.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE));
        ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region);
        if (secondaryAndTertiaryNodes != null) {
            favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), secondaryAndTertiaryNodes[0].getPort(), ServerName.NON_STARTCODE));
            favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), secondaryAndTertiaryNodes[1].getPort(), ServerName.NON_STARTCODE));
        }
        regionFNMap.put(region, favoredNodesForRegion);
    }
    fnm.updateFavoredNodes(regionFNMap);
}
Also used : ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ArrayList(java.util.ArrayList) List(java.util.List)

Aggregations

RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)824 Test (org.junit.Test)416 TableName (org.apache.hadoop.hbase.TableName)311 ServerName (org.apache.hadoop.hbase.ServerName)191 ArrayList (java.util.ArrayList)175 IOException (java.io.IOException)174 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)174 Path (org.apache.hadoop.fs.Path)141 List (java.util.List)118 HashMap (java.util.HashMap)90 Table (org.apache.hadoop.hbase.client.Table)90 Map (java.util.Map)81 Put (org.apache.hadoop.hbase.client.Put)81 Configuration (org.apache.hadoop.conf.Configuration)80 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)67 TreeMap (java.util.TreeMap)66 Result (org.apache.hadoop.hbase.client.Result)59 FileSystem (org.apache.hadoop.fs.FileSystem)58 Cell (org.apache.hadoop.hbase.Cell)50 Scan (org.apache.hadoop.hbase.client.Scan)46