Search in sources :

Example 1 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class StorageClusterStatusResource method get.

@GET
@Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context final UriInfo uriInfo) {
    if (LOG.isTraceEnabled()) {
        LOG.trace("GET " + uriInfo.getAbsolutePath());
    }
    servlet.getMetrics().incrementRequests(1);
    try {
        ClusterMetrics status = servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS));
        StorageClusterStatusModel model = new StorageClusterStatusModel();
        model.setRegions(status.getRegionCount());
        model.setRequests(status.getRequestCount());
        model.setAverageLoad(status.getAverageLoad());
        for (Map.Entry<ServerName, ServerMetrics> entry : status.getLiveServerMetrics().entrySet()) {
            ServerName sn = entry.getKey();
            ServerMetrics load = entry.getValue();
            StorageClusterStatusModel.Node node = model.addLiveNode(sn.getHostname() + ":" + Integer.toString(sn.getPort()), sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE));
            node.setRequests(load.getRequestCount());
            for (RegionMetrics region : load.getRegionMetrics().values()) {
                node.addRegion(region.getRegionName(), region.getStoreCount(), region.getStoreFileCount(), (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE), (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE), (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE), region.getReadRequestCount(), region.getCpRequestCount(), region.getWriteRequestCount(), (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE), (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE), (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE), region.getCompactingCellCount(), region.getCompactedCellCount());
            }
        }
        for (ServerName name : status.getDeadServerNames()) {
            model.addDeadNode(name.toString());
        }
        ResponseBuilder response = Response.ok(model);
        response.cacheControl(cacheControl);
        servlet.getMetrics().incrementSucessfulGetRequests(1);
        return response.build();
    } catch (IOException e) {
        servlet.getMetrics().incrementFailedGetRequests(1);
        return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
    }
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) ServerName(org.apache.hadoop.hbase.ServerName) StorageClusterStatusModel(org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) IOException(java.io.IOException) ResponseBuilder(org.apache.hbase.thirdparty.javax.ws.rs.core.Response.ResponseBuilder) Map(java.util.Map) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics) Produces(org.apache.hbase.thirdparty.javax.ws.rs.Produces) GET(org.apache.hbase.thirdparty.javax.ws.rs.GET)

Example 2 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class RegionsRecoveryChore method chore.

@Override
protected void chore() {
    if (LOG.isTraceEnabled()) {
        LOG.trace("Starting up Regions Recovery chore for reopening regions based on storeFileRefCount...");
    }
    try {
        // only if storeFileRefCountThreshold > 0, consider the feature turned on
        if (storeFileRefCountThreshold > 0) {
            final ClusterMetrics clusterMetrics = hMaster.getClusterMetrics();
            final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
            final Map<TableName, List<byte[]>> tableToReopenRegionsMap = getTableToRegionsByRefCount(serverMetricsMap);
            if (MapUtils.isNotEmpty(tableToReopenRegionsMap)) {
                tableToReopenRegionsMap.forEach((tableName, regionNames) -> {
                    try {
                        LOG.warn("Reopening regions due to high storeFileRefCount. " + "TableName: {} , noOfRegions: {}", tableName, regionNames.size());
                        hMaster.reopenRegions(tableName, regionNames, NONCE_GENERATOR.getNonceGroup(), NONCE_GENERATOR.newNonce());
                    } catch (IOException e) {
                        LOG.error("{} tableName: {}, regionNames: {}", ERROR_REOPEN_REIONS_MSG, tableName, regionNames, e);
                    }
                });
            }
        } else {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Reopening regions with very high storeFileRefCount is disabled. " + "Provide threshold value > 0 for {} to enable it.", HConstants.STORE_FILE_REF_COUNT_THRESHOLD);
            }
        }
    } catch (Exception e) {
        LOG.error("Error while reopening regions based on storeRefCount threshold", e);
    }
    if (LOG.isTraceEnabled()) {
        LOG.trace("Exiting Regions Recovery chore for reopening regions based on storeFileRefCount...");
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) ArrayList(java.util.ArrayList) List(java.util.List) IOException(java.io.IOException) IOException(java.io.IOException)

Example 3 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class RegionsRecoveryChore method getTableToRegionsByRefCount.

private Map<TableName, List<byte[]>> getTableToRegionsByRefCount(final Map<ServerName, ServerMetrics> serverMetricsMap) {
    final Map<TableName, List<byte[]>> tableToReopenRegionsMap = new HashMap<>();
    for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
        Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
        for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
            // For each region, each compacted store file can have different ref counts
            // We need to find maximum of all such ref counts and if that max count of compacted
            // store files is beyond a threshold value, we should reopen the region.
            // Here, we take max ref count of all compacted store files and not the cumulative
            // count of all compacted store files
            final int maxCompactedStoreFileRefCount = regionMetrics.getMaxCompactedStoreFileRefCount();
            if (maxCompactedStoreFileRefCount > storeFileRefCountThreshold) {
                final byte[] regionName = regionMetrics.getRegionName();
                prepareTableToReopenRegionsMap(tableToReopenRegionsMap, regionName, maxCompactedStoreFileRefCount);
            }
        }
    }
    return tableToReopenRegionsMap;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) List(java.util.List) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 4 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class FavoredNodeLoadBalancer method balanceTable.

@Override
protected List<RegionPlan> balanceTable(TableName tableName, Map<ServerName, List<RegionInfo>> loadOfOneTable) {
    // TODO. Look at is whether Stochastic loadbalancer can be integrated with this
    List<RegionPlan> plans = new ArrayList<>();
    Map<ServerName, ServerName> serverNameWithoutCodeToServerName = new HashMap<>();
    for (ServerName sn : provider.getOnlineServersList()) {
        ServerName s = ServerName.valueOf(sn.getHostname(), sn.getPort(), ServerName.NON_STARTCODE);
        // FindBugs complains about useless store! serverNameToServerNameWithoutCode.put(sn, s);
        serverNameWithoutCodeToServerName.put(s, sn);
    }
    for (Map.Entry<ServerName, List<RegionInfo>> entry : loadOfOneTable.entrySet()) {
        ServerName currentServer = entry.getKey();
        // get a server without the startcode for the currentServer
        ServerName currentServerWithoutStartCode = ServerName.valueOf(currentServer.getHostname(), currentServer.getPort(), ServerName.NON_STARTCODE);
        List<RegionInfo> list = entry.getValue();
        for (RegionInfo region : list) {
            if (!FavoredNodesManager.isFavoredNodeApplicable(region)) {
                continue;
            }
            List<ServerName> favoredNodes = fnm.getFavoredNodes(region);
            if (favoredNodes == null || favoredNodes.get(0).equals(currentServerWithoutStartCode)) {
                // either favorednodes does not exist or we are already on the primary node
                continue;
            }
            // check whether the primary is available
            ServerName destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(0));
            if (destination == null) {
                // check whether the region is on secondary/tertiary
                if (currentServerWithoutStartCode.equals(favoredNodes.get(1)) || currentServerWithoutStartCode.equals(favoredNodes.get(2))) {
                    continue;
                }
                // the region is currently on none of the favored nodes
                // get it on one of them if possible
                ServerMetrics l1 = provider.getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(1)));
                ServerMetrics l2 = provider.getLoad(serverNameWithoutCodeToServerName.get(favoredNodes.get(2)));
                if (l1 != null && l2 != null) {
                    if (l1.getRegionMetrics().size() > l2.getRegionMetrics().size()) {
                        destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2));
                    } else {
                        destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1));
                    }
                } else if (l1 != null) {
                    destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(1));
                } else if (l2 != null) {
                    destination = serverNameWithoutCodeToServerName.get(favoredNodes.get(2));
                }
            }
            if (destination != null) {
                RegionPlan plan = new RegionPlan(region, currentServer, destination);
                plans.add(plan);
            }
        }
    }
    return plans;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionPlan(org.apache.hadoop.hbase.master.RegionPlan) ServerName(org.apache.hadoop.hbase.ServerName) ArrayList(java.util.ArrayList) List(java.util.List) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) HashMap(java.util.HashMap) Map(java.util.Map)

Example 5 with ServerMetrics

use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.

the class RegionHDFSBlockLocationFinder method getOldLocality.

private float getOldLocality(ServerName newServer, byte[] regionName, Map<ServerName, ServerMetrics> oldServers) {
    ServerMetrics serverMetrics = oldServers.get(newServer);
    if (serverMetrics == null) {
        return -1f;
    }
    RegionMetrics regionMetrics = serverMetrics.getRegionMetrics().get(regionName);
    if (regionMetrics == null) {
        return -1f;
    }
    return regionMetrics.getDataLocality();
}
Also used : ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Aggregations

ServerMetrics (org.apache.hadoop.hbase.ServerMetrics)37 ServerName (org.apache.hadoop.hbase.ServerName)27 RegionMetrics (org.apache.hadoop.hbase.RegionMetrics)19 ClusterMetrics (org.apache.hadoop.hbase.ClusterMetrics)18 HashMap (java.util.HashMap)13 List (java.util.List)11 ArrayList (java.util.ArrayList)10 Map (java.util.Map)10 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)10 Test (org.junit.Test)10 IOException (java.io.IOException)7 TreeMap (java.util.TreeMap)6 TableName (org.apache.hadoop.hbase.TableName)6 Configuration (org.apache.hadoop.conf.Configuration)5 Collections (java.util.Collections)4 Collectors (java.util.stream.Collectors)4 InterfaceAudience (org.apache.yetus.audience.InterfaceAudience)4 InterruptedIOException (java.io.InterruptedIOException)3 Arrays (java.util.Arrays)3 HashSet (java.util.HashSet)3