Search in sources :

Example 41 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class RegionHDFSBlockLocationFinder method refreshLocalityChangedRegions.

/**
 * If locality for a region has changed, that pretty certainly means our cache is out of date.
 * Compare oldStatus and newStatus, refreshing any regions which have moved or changed locality.
 */
private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetrics newStatus) {
    if (oldStatus == null || newStatus == null) {
        LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", oldStatus, newStatus);
        return;
    }
    Map<ServerName, ServerMetrics> oldServers = oldStatus.getLiveServerMetrics();
    Map<ServerName, ServerMetrics> newServers = newStatus.getLiveServerMetrics();
    Map<String, RegionInfo> regionsByName = new HashMap<>(cache.asMap().size());
    for (RegionInfo regionInfo : cache.asMap().keySet()) {
        regionsByName.put(regionInfo.getEncodedName(), regionInfo);
    }
    for (Map.Entry<ServerName, ServerMetrics> serverEntry : newServers.entrySet()) {
        Map<byte[], RegionMetrics> newRegions = serverEntry.getValue().getRegionMetrics();
        for (Map.Entry<byte[], RegionMetrics> regionEntry : newRegions.entrySet()) {
            String encodedName = RegionInfo.encodeRegionName(regionEntry.getKey());
            RegionInfo region = regionsByName.get(encodedName);
            if (region == null) {
                continue;
            }
            float newLocality = regionEntry.getValue().getDataLocality();
            float oldLocality = getOldLocality(serverEntry.getKey(), regionEntry.getKey(), oldServers);
            if (Math.abs(newLocality - oldLocality) > EPSILON) {
                LOG.debug("Locality for region {} changed from {} to {}, refreshing cache", region.getEncodedName(), oldLocality, newLocality);
                cache.refresh(region);
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) HashMap(java.util.HashMap) Map(java.util.Map) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 42 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class AssignmentVerificationReport method print.

public void print(boolean isDetailMode) {
    if (!isFilledUp) {
        System.err.println("[Error] Region assignment verification report" + "hasn't been filled up");
    }
    DecimalFormat df = new java.text.DecimalFormat("#.##");
    // Print some basic information
    System.out.println("Region Assignment Verification for Table: " + tableName + "\n\tTotal regions : " + totalRegions);
    // Print the number of regions on each kinds of the favored nodes
    System.out.println("\tTotal regions on favored nodes " + totalFavoredAssignments);
    for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
        System.out.println("\t\tTotal regions on " + p.toString() + " region servers: " + favoredNodes[p.ordinal()]);
    }
    // Print the number of regions in each kinds of invalid assignment
    System.out.println("\tTotal unassigned regions: " + unAssignedRegionsList.size());
    if (isDetailMode) {
        for (RegionInfo region : unAssignedRegionsList) {
            System.out.println("\t\t" + region.getRegionNameAsString());
        }
    }
    System.out.println("\tTotal regions NOT on favored nodes: " + nonFavoredAssignedRegionList.size());
    if (isDetailMode) {
        for (RegionInfo region : nonFavoredAssignedRegionList) {
            System.out.println("\t\t" + region.getRegionNameAsString());
        }
    }
    System.out.println("\tTotal regions without favored nodes: " + regionsWithoutValidFavoredNodes.size());
    if (isDetailMode) {
        for (RegionInfo region : regionsWithoutValidFavoredNodes) {
            System.out.println("\t\t" + region.getRegionNameAsString());
        }
    }
    // Print the locality information if enabled
    if (this.enforceLocality && totalRegions != 0) {
        // Print the actual locality for this table
        float actualLocality = 100 * this.actualLocalitySummary / (float) totalRegions;
        System.out.println("\n\tThe actual avg locality is " + df.format(actualLocality) + " %");
        // favored nodes
        for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
            float avgLocality = 100 * (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions);
            System.out.println("\t\tThe expected avg locality if all regions" + " on the " + p.toString() + " region servers: " + df.format(avgLocality) + " %");
        }
    }
    // Print the region balancing information
    System.out.println("\n\tTotal hosting region servers: " + totalRegionServers);
    // Print the region balance information
    if (totalRegionServers != 0) {
        System.out.println("\tAvg dispersion num: " + df.format(avgDispersionNum) + " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) + " hosts;\tMin dispersion num: " + df.format(minDispersionNum) + " hosts;");
        System.out.println("\t\tThe number of the region servers with the max" + " dispersion num: " + this.maxDispersionNumServerSet.size());
        if (isDetailMode) {
            printHServerAddressSet(maxDispersionNumServerSet);
        }
        System.out.println("\t\tThe number of the region servers with the min" + " dispersion num: " + this.minDispersionNumServerSet.size());
        if (isDetailMode) {
            printHServerAddressSet(maxDispersionNumServerSet);
        }
        System.out.println("\tAvg dispersion score: " + df.format(avgDispersionScore) + ";\tMax dispersion score: " + df.format(maxDispersionScore) + ";\tMin dispersion score: " + df.format(minDispersionScore) + ";");
        System.out.println("\t\tThe number of the region servers with the max" + " dispersion score: " + this.maxDispersionScoreServerSet.size());
        if (isDetailMode) {
            printHServerAddressSet(maxDispersionScoreServerSet);
        }
        System.out.println("\t\tThe number of the region servers with the min" + " dispersion score: " + this.minDispersionScoreServerSet.size());
        if (isDetailMode) {
            printHServerAddressSet(minDispersionScoreServerSet);
        }
        System.out.println("\tAvg regions/region server: " + df.format(avgRegionsOnRS) + ";\tMax regions/region server: " + maxRegionsOnRS + ";\tMin regions/region server: " + minRegionsOnRS + ";");
        // Print the details about the most loaded region servers
        System.out.println("\t\tThe number of the most loaded region servers: " + mostLoadedRSSet.size());
        if (isDetailMode) {
            printHServerAddressSet(mostLoadedRSSet);
        }
        // Print the details about the least loaded region servers
        System.out.println("\t\tThe number of the least loaded region servers: " + leastLoadedRSSet.size());
        if (isDetailMode) {
            printHServerAddressSet(leastLoadedRSSet);
        }
    }
    System.out.println("==============================");
}
Also used : DecimalFormat(java.text.DecimalFormat) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) FavoredNodesPlan(org.apache.hadoop.hbase.favored.FavoredNodesPlan)

Example 43 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class SnapshotOfRegionAssignmentFromMeta method addRegion.

private void addRegion(RegionInfo regionInfo) {
    // Process the region name to region info map
    regionNameToRegionInfoMap.put(regionInfo.getRegionNameAsString(), regionInfo);
    // Process the table to region map
    TableName tableName = regionInfo.getTable();
    List<RegionInfo> regionList = tableToRegionMap.get(tableName);
    if (regionList == null) {
        regionList = new ArrayList<>();
    }
    // Add the current region info into the tableToRegionMap
    regionList.add(regionInfo);
    tableToRegionMap.put(tableName, regionList);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo)

Example 44 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class AsyncAggregationClient method min.

public static <R, S, P extends Message, Q extends Message, T extends Message> CompletableFuture<R> min(AsyncTable<?> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) {
    CompletableFuture<R> future = new CompletableFuture<>();
    AggregateRequest req;
    try {
        req = validateArgAndGetPB(scan, ci, false);
    } catch (IOException e) {
        future.completeExceptionally(e);
        return future;
    }
    AbstractAggregationCallback<R> callback = new AbstractAggregationCallback<R>(future) {

        private R min;

        @Override
        protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
            if (resp.getFirstPartCount() > 0) {
                R result = getCellValueFromProto(ci, resp, 0);
                if (min == null || (result != null && ci.compare(min, result) > 0)) {
                    min = result;
                }
            }
        }

        @Override
        protected R getFinalResult() {
            return min;
        }
    };
    table.<AggregateService, AggregateResponse>coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getMin(controller, req, rpcCallback), callback).fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()).toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute();
    return future;
}
Also used : AdvancedScanResultConsumer(org.apache.hadoop.hbase.client.AdvancedScanResultConsumer) FutureUtils.addListener(org.apache.hadoop.hbase.util.FutureUtils.addListener) CoprocessorCallback(org.apache.hadoop.hbase.client.AsyncTable.CoprocessorCallback) ColumnInterpreter(org.apache.hadoop.hbase.coprocessor.ColumnInterpreter) AggregateResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateResponse) Result(org.apache.hadoop.hbase.client.Result) CompletableFuture(java.util.concurrent.CompletableFuture) AggregateRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateRequest) Message(org.apache.hbase.thirdparty.com.google.protobuf.Message) HConstants(org.apache.hadoop.hbase.HConstants) Map(java.util.Map) AggregationHelper.validateArgAndGetPB(org.apache.hadoop.hbase.client.coprocessor.AggregationHelper.validateArgAndGetPB) NoSuchElementException(java.util.NoSuchElementException) Cell(org.apache.hadoop.hbase.Cell) Bytes(org.apache.hadoop.hbase.util.Bytes) ReflectionUtils(org.apache.hadoop.hbase.util.ReflectionUtils) IOException(java.io.IOException) NavigableSet(java.util.NavigableSet) NavigableMap(java.util.NavigableMap) AggregateService(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateService) Scan(org.apache.hadoop.hbase.client.Scan) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) TreeMap(java.util.TreeMap) AggregationHelper.getParsedGenericInstance(org.apache.hadoop.hbase.client.coprocessor.AggregationHelper.getParsedGenericInstance) AsyncTable(org.apache.hadoop.hbase.client.AsyncTable) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) CompletableFuture(java.util.concurrent.CompletableFuture) AggregateRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateRequest) AggregateResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateResponse) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException)

Example 45 with RegionInfo

use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.

the class AsyncAggregationClient method sumByRegion.

// the map key is the startRow of the region
private static <R, S, P extends Message, Q extends Message, T extends Message> CompletableFuture<NavigableMap<byte[], S>> sumByRegion(AsyncTable<?> table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) {
    CompletableFuture<NavigableMap<byte[], S>> future = new CompletableFuture<NavigableMap<byte[], S>>();
    AggregateRequest req;
    try {
        req = validateArgAndGetPB(scan, ci, false);
    } catch (IOException e) {
        future.completeExceptionally(e);
        return future;
    }
    int firstPartIndex = scan.getFamilyMap().get(scan.getFamilies()[0]).size() - 1;
    AbstractAggregationCallback<NavigableMap<byte[], S>> callback = new AbstractAggregationCallback<NavigableMap<byte[], S>>(future) {

        private final NavigableMap<byte[], S> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);

        @Override
        protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException {
            if (resp.getFirstPartCount() > 0) {
                map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex));
            }
        }

        @Override
        protected NavigableMap<byte[], S> getFinalResult() {
            return map;
        }
    };
    table.<AggregateService, AggregateResponse>coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback).fromRow(nullToEmpty(scan.getStartRow()), scan.includeStartRow()).toRow(nullToEmpty(scan.getStopRow()), scan.includeStopRow()).execute();
    return future;
}
Also used : AdvancedScanResultConsumer(org.apache.hadoop.hbase.client.AdvancedScanResultConsumer) FutureUtils.addListener(org.apache.hadoop.hbase.util.FutureUtils.addListener) CoprocessorCallback(org.apache.hadoop.hbase.client.AsyncTable.CoprocessorCallback) ColumnInterpreter(org.apache.hadoop.hbase.coprocessor.ColumnInterpreter) AggregateResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateResponse) Result(org.apache.hadoop.hbase.client.Result) CompletableFuture(java.util.concurrent.CompletableFuture) AggregateRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateRequest) Message(org.apache.hbase.thirdparty.com.google.protobuf.Message) HConstants(org.apache.hadoop.hbase.HConstants) Map(java.util.Map) AggregationHelper.validateArgAndGetPB(org.apache.hadoop.hbase.client.coprocessor.AggregationHelper.validateArgAndGetPB) NoSuchElementException(java.util.NoSuchElementException) Cell(org.apache.hadoop.hbase.Cell) Bytes(org.apache.hadoop.hbase.util.Bytes) ReflectionUtils(org.apache.hadoop.hbase.util.ReflectionUtils) IOException(java.io.IOException) NavigableSet(java.util.NavigableSet) NavigableMap(java.util.NavigableMap) AggregateService(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateService) Scan(org.apache.hadoop.hbase.client.Scan) InterfaceAudience(org.apache.yetus.audience.InterfaceAudience) TreeMap(java.util.TreeMap) AggregationHelper.getParsedGenericInstance(org.apache.hadoop.hbase.client.coprocessor.AggregationHelper.getParsedGenericInstance) AsyncTable(org.apache.hadoop.hbase.client.AsyncTable) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) NavigableMap(java.util.NavigableMap) AggregateRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateRequest) AggregateResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AggregateProtos.AggregateResponse) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) IOException(java.io.IOException) CompletableFuture(java.util.concurrent.CompletableFuture)

Aggregations

RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)824 Test (org.junit.Test)416 TableName (org.apache.hadoop.hbase.TableName)311 ServerName (org.apache.hadoop.hbase.ServerName)191 ArrayList (java.util.ArrayList)175 IOException (java.io.IOException)174 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)174 Path (org.apache.hadoop.fs.Path)141 List (java.util.List)118 HashMap (java.util.HashMap)90 Table (org.apache.hadoop.hbase.client.Table)90 Map (java.util.Map)81 Put (org.apache.hadoop.hbase.client.Put)81 Configuration (org.apache.hadoop.conf.Configuration)80 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)67 TreeMap (java.util.TreeMap)66 Result (org.apache.hadoop.hbase.client.Result)59 FileSystem (org.apache.hadoop.fs.FileSystem)58 Cell (org.apache.hadoop.hbase.Cell)50 Scan (org.apache.hadoop.hbase.client.Scan)46