use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class RegionHDFSBlockLocationFinder method refreshLocalityChangedRegions.
/**
* If locality for a region has changed, that pretty certainly means our cache is out of date.
* Compare oldStatus and newStatus, refreshing any regions which have moved or changed locality.
*/
private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetrics newStatus) {
if (oldStatus == null || newStatus == null) {
LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", oldStatus, newStatus);
return;
}
Map<ServerName, ServerMetrics> oldServers = oldStatus.getLiveServerMetrics();
Map<ServerName, ServerMetrics> newServers = newStatus.getLiveServerMetrics();
Map<String, RegionInfo> regionsByName = new HashMap<>(cache.asMap().size());
for (RegionInfo regionInfo : cache.asMap().keySet()) {
regionsByName.put(regionInfo.getEncodedName(), regionInfo);
}
for (Map.Entry<ServerName, ServerMetrics> serverEntry : newServers.entrySet()) {
Map<byte[], RegionMetrics> newRegions = serverEntry.getValue().getRegionMetrics();
for (Map.Entry<byte[], RegionMetrics> regionEntry : newRegions.entrySet()) {
String encodedName = RegionInfo.encodeRegionName(regionEntry.getKey());
RegionInfo region = regionsByName.get(encodedName);
if (region == null) {
continue;
}
float newLocality = regionEntry.getValue().getDataLocality();
float oldLocality = getOldLocality(serverEntry.getKey(), regionEntry.getKey(), oldServers);
if (Math.abs(newLocality - oldLocality) > EPSILON) {
LOG.debug("Locality for region {} changed from {} to {}, refreshing cache", region.getEncodedName(), oldLocality, newLocality);
cache.refresh(region);
}
}
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class AssignmentVerificationReport method print.
public void print(boolean isDetailMode) {
if (!isFilledUp) {
System.err.println("[Error] Region assignment verification report" + "hasn't been filled up");
}
DecimalFormat df = new java.text.DecimalFormat("#.##");
// Print some basic information
System.out.println("Region Assignment Verification for Table: " + tableName + "\n\tTotal regions : " + totalRegions);
// Print the number of regions on each kinds of the favored nodes
System.out.println("\tTotal regions on favored nodes " + totalFavoredAssignments);
for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
System.out.println("\t\tTotal regions on " + p.toString() + " region servers: " + favoredNodes[p.ordinal()]);
}
// Print the number of regions in each kinds of invalid assignment
System.out.println("\tTotal unassigned regions: " + unAssignedRegionsList.size());
if (isDetailMode) {
for (RegionInfo region : unAssignedRegionsList) {
System.out.println("\t\t" + region.getRegionNameAsString());
}
}
System.out.println("\tTotal regions NOT on favored nodes: " + nonFavoredAssignedRegionList.size());
if (isDetailMode) {
for (RegionInfo region : nonFavoredAssignedRegionList) {
System.out.println("\t\t" + region.getRegionNameAsString());
}
}
System.out.println("\tTotal regions without favored nodes: " + regionsWithoutValidFavoredNodes.size());
if (isDetailMode) {
for (RegionInfo region : regionsWithoutValidFavoredNodes) {
System.out.println("\t\t" + region.getRegionNameAsString());
}
}
// Print the locality information if enabled
if (this.enforceLocality && totalRegions != 0) {
// Print the actual locality for this table
float actualLocality = 100 * this.actualLocalitySummary / (float) totalRegions;
System.out.println("\n\tThe actual avg locality is " + df.format(actualLocality) + " %");
// favored nodes
for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
float avgLocality = 100 * (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions);
System.out.println("\t\tThe expected avg locality if all regions" + " on the " + p.toString() + " region servers: " + df.format(avgLocality) + " %");
}
}
// Print the region balancing information
System.out.println("\n\tTotal hosting region servers: " + totalRegionServers);
// Print the region balance information
if (totalRegionServers != 0) {
System.out.println("\tAvg dispersion num: " + df.format(avgDispersionNum) + " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + " hosts;");
System.out.println("\t\tThe number of the region servers with the max" + " dispersion num: " + this.maxDispersionNumServerSet.size());
if (isDetailMode) {
printHServerAddressSet(maxDispersionNumServerSet);
}
System.out.println("\t\tThe number of the region servers with the min" + " dispersion num: " + this.minDispersionNumServerSet.size());
if (isDetailMode) {
printHServerAddressSet(maxDispersionNumServerSet);
}
System.out.println("\tAvg dispersion score: " + df.format(avgDispersionScore) + ";\tMax dispersion score: " + df.format(maxDispersionScore) + ";\tMin dispersion score: " + df.format(minDispersionScore) + ";");
System.out.println("\t\tThe number of the region servers with the max" + " dispersion score: " + this.maxDispersionScoreServerSet.size());
if (isDetailMode) {
printHServerAddressSet(maxDispersionScoreServerSet);
}
System.out.println("\t\tThe number of the region servers with the min" + " dispersion score: " + this.minDispersionScoreServerSet.size());
if (isDetailMode) {
printHServerAddressSet(minDispersionScoreServerSet);
}
System.out.println("\tAvg regions/region server: " + df.format(avgRegionsOnRS) + ";\tMax regions/region server: " + maxRegionsOnRS + ";\tMin regions/region server: " + minRegionsOnRS + ";");
// Print the details about the most loaded region servers
System.out.println("\t\tThe number of the most loaded region servers: " + mostLoadedRSSet.size());
if (isDetailMode) {
printHServerAddressSet(mostLoadedRSSet);
}
// Print the details about the least loaded region servers
System.out.println("\t\tThe number of the least loaded region servers: " + leastLoadedRSSet.size());
if (isDetailMode) {
printHServerAddressSet(leastLoadedRSSet);
}
}
System.out.println("==============================");
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class SnapshotOfRegionAssignmentFromMeta method addRegion.
private void addRegion(RegionInfo regionInfo) {
// Process the region name to region info map
regionNameToRegionInfoMap.put(regionInfo.getRegionNameAsString(), regionInfo);
// Process the table to region map
TableName tableName = regionInfo.getTable();
List<RegionInfo> regionList = tableToRegionMap.get(tableName);
if (regionList == null) {
regionList = new ArrayList<>();
}
// Add the current region info into the tableToRegionMap
regionList.add(regionInfo);
tableToRegionMap.put(tableName, regionList);
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class AsyncAggregationClient method min.
public static <R, S, P extends Message, Q extends Message, T extends Message> CompletableFuture<R> min(AsyncTable<?> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) {
CompletableFuture<R> future = new CompletableFuture<>();
AggregateRequest req;
try {
req = validateArgAndGetPB(scan, ci, false);
} catch (IOException e) {
future.completeExceptionally(e);
return future;
}
AbstractAggregationCallback<R> callback = new AbstractAggregationCallback<R>(future) {
private R min;
@Override
protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
if (resp.getFirstPartCount() > 0) {
R result = getCellValueFromProto(ci, resp, 0);
if (min == null || (result != null && ci.compare(min, result) > 0)) {
min = result;
}
}
}
@Override
protected R getFinalResult() {
return min;
}
};
table.<AggregateService, AggregateResponse>coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getMin(controller, req, rpcCallback), callback).fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()).toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute();
return future;
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class AsyncAggregationClient method sumByRegion.
// the map key is the startRow of the region
private static <R, S, P extends Message, Q extends Message, T extends Message> CompletableFuture<NavigableMap<byte[], S>> sumByRegion(AsyncTable<?> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) {
CompletableFuture<NavigableMap<byte[], S>> future = new CompletableFuture<NavigableMap<byte[], S>>();
AggregateRequest req;
try {
req = validateArgAndGetPB(scan, ci, false);
} catch (IOException e) {
future.completeExceptionally(e);
return future;
}
int firstPartIndex = scan.getFamilyMap().get(scan.getFamilies()[0]).size() - 1;
AbstractAggregationCallback<NavigableMap<byte[], S>> callback = new AbstractAggregationCallback<NavigableMap<byte[], S>>(future) {
private final NavigableMap<byte[], S> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
@Override
protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
if (resp.getFirstPartCount() > 0) {
map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex));
}
}
@Override
protected NavigableMap<byte[], S> getFinalResult() {
return map;
}
};
table.<AggregateService, AggregateResponse>coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback).fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()).toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute();
return future;
}
Aggregations