Search in sources :

Example 1 with RegionMetrics

use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.

the class StorageClusterStatusResource method get.

@GET
@Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context final UriInfo uriInfo) {
    if (LOG.isTraceEnabled()) {
        LOG.trace("GET " + uriInfo.getAbsolutePath());
    }
    servlet.getMetrics().incrementRequests(1);
    try {
        ClusterMetrics status = servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS));
        StorageClusterStatusModel model = new StorageClusterStatusModel();
        model.setRegions(status.getRegionCount());
        model.setRequests(status.getRequestCount());
        model.setAverageLoad(status.getAverageLoad());
        for (Map.Entry<ServerName, ServerMetrics> entry : status.getLiveServerMetrics().entrySet()) {
            ServerName sn = entry.getKey();
            ServerMetrics load = entry.getValue();
            StorageClusterStatusModel.Node node = model.addLiveNode(sn.getHostname() + ":" + Integer.toString(sn.getPort()), sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE));
            node.setRequests(load.getRequestCount());
            for (RegionMetrics region : load.getRegionMetrics().values()) {
                node.addRegion(region.getRegionName(), region.getStoreCount(), region.getStoreFileCount(), (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE), (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE), (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE), region.getReadRequestCount(), region.getCpRequestCount(), region.getWriteRequestCount(), (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE), (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE), (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE), region.getCompactingCellCount(), region.getCompactedCellCount());
            }
        }
        for (ServerName name : status.getDeadServerNames()) {
            model.addDeadNode(name.toString());
        }
        ResponseBuilder response = Response.ok(model);
        response.cacheControl(cacheControl);
        servlet.getMetrics().incrementSucessfulGetRequests(1);
        return response.build();
    } catch (IOException e) {
        servlet.getMetrics().incrementFailedGetRequests(1);
        return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
    }
}
Also used : ClusterMetrics(org.apache.hadoop.hbase.ClusterMetrics) ServerName(org.apache.hadoop.hbase.ServerName) StorageClusterStatusModel(org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) IOException(java.io.IOException) ResponseBuilder(org.apache.hbase.thirdparty.javax.ws.rs.core.Response.ResponseBuilder) Map(java.util.Map) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics) Produces(org.apache.hbase.thirdparty.javax.ws.rs.Produces) GET(org.apache.hbase.thirdparty.javax.ws.rs.GET)

Example 2 with RegionMetrics

use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.

the class RegionsRecoveryChore method getTableToRegionsByRefCount.

private Map<TableName, List<byte[]>> getTableToRegionsByRefCount(final Map<ServerName, ServerMetrics> serverMetricsMap) {
    final Map<TableName, List<byte[]>> tableToReopenRegionsMap = new HashMap<>();
    for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
        Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
        for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
            // For each region, each compacted store file can have different ref counts
            // We need to find maximum of all such ref counts and if that max count of compacted
            // store files is beyond a threshold value, we should reopen the region.
            // Here, we take max ref count of all compacted store files and not the cumulative
            // count of all compacted store files
            final int maxCompactedStoreFileRefCount = regionMetrics.getMaxCompactedStoreFileRefCount();
            if (maxCompactedStoreFileRefCount > storeFileRefCountThreshold) {
                final byte[] regionName = regionMetrics.getRegionName();
                prepareTableToReopenRegionsMap(tableToReopenRegionsMap, regionName, maxCompactedStoreFileRefCount);
            }
        }
    }
    return tableToReopenRegionsMap;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) List(java.util.List) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 3 with RegionMetrics

use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.

the class RegionHDFSBlockLocationFinder method getOldLocality.

private float getOldLocality(ServerName newServer, byte[] regionName, Map<ServerName, ServerMetrics> oldServers) {
    ServerMetrics serverMetrics = oldServers.get(newServer);
    if (serverMetrics == null) {
        return -1f;
    }
    RegionMetrics regionMetrics = serverMetrics.getRegionMetrics().get(regionName);
    if (regionMetrics == null) {
        return -1f;
    }
    return regionMetrics.getDataLocality();
}
Also used : ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 4 with RegionMetrics

use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.

the class RegionHDFSBlockLocationFinder method refreshLocalityChangedRegions.

/**
 * If locality for a region has changed, that pretty certainly means our cache is out of date.
 * Compare oldStatus and newStatus, refreshing any regions which have moved or changed locality.
 */
private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetrics newStatus) {
    if (oldStatus == null || newStatus == null) {
        LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", oldStatus, newStatus);
        return;
    }
    Map<ServerName, ServerMetrics> oldServers = oldStatus.getLiveServerMetrics();
    Map<ServerName, ServerMetrics> newServers = newStatus.getLiveServerMetrics();
    Map<String, RegionInfo> regionsByName = new HashMap<>(cache.asMap().size());
    for (RegionInfo regionInfo : cache.asMap().keySet()) {
        regionsByName.put(regionInfo.getEncodedName(), regionInfo);
    }
    for (Map.Entry<ServerName, ServerMetrics> serverEntry : newServers.entrySet()) {
        Map<byte[], RegionMetrics> newRegions = serverEntry.getValue().getRegionMetrics();
        for (Map.Entry<byte[], RegionMetrics> regionEntry : newRegions.entrySet()) {
            String encodedName = RegionInfo.encodeRegionName(regionEntry.getKey());
            RegionInfo region = regionsByName.get(encodedName);
            if (region == null) {
                continue;
            }
            float newLocality = regionEntry.getValue().getDataLocality();
            float oldLocality = getOldLocality(serverEntry.getKey(), regionEntry.getKey(), oldServers);
            if (Math.abs(newLocality - oldLocality) > EPSILON) {
                LOG.debug("Locality for region {} changed from {} to {}, refreshing cache", region.getEncodedName(), oldLocality, newLocality);
                cache.refresh(region);
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ServerName(org.apache.hadoop.hbase.ServerName) ServerMetrics(org.apache.hadoop.hbase.ServerMetrics) HashMap(java.util.HashMap) Map(java.util.Map) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Example 5 with RegionMetrics

use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.

the class TestRegionSizeCalculator method mockAdmin.

/**
 * Creates mock returning RegionLoad info about given servers.
 */
private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception {
    Admin mockAdmin = Mockito.mock(Admin.class);
    List<RegionMetrics> regionLoads = new ArrayList<>();
    for (RegionMetrics regionLoad : regionLoadArray) {
        regionLoads.add(regionLoad);
    }
    when(mockAdmin.getConfiguration()).thenReturn(configuration);
    when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable"))).thenReturn(regionLoads);
    return mockAdmin;
}
Also used : ArrayList(java.util.ArrayList) Admin(org.apache.hadoop.hbase.client.Admin) RegionMetrics(org.apache.hadoop.hbase.RegionMetrics)

Aggregations

RegionMetrics (org.apache.hadoop.hbase.RegionMetrics)25 ServerMetrics (org.apache.hadoop.hbase.ServerMetrics)18 ServerName (org.apache.hadoop.hbase.ServerName)16 ClusterMetrics (org.apache.hadoop.hbase.ClusterMetrics)7 HashMap (java.util.HashMap)6 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)5 Test (org.junit.Test)5 Map (java.util.Map)4 TreeMap (java.util.TreeMap)4 Size (org.apache.hadoop.hbase.Size)4 ArrayList (java.util.ArrayList)3 Configuration (org.apache.hadoop.conf.Configuration)3 Stoppable (org.apache.hadoop.hbase.Stoppable)3 IOException (java.io.IOException)2 List (java.util.List)2 TableName (org.apache.hadoop.hbase.TableName)2 UserMetrics (org.apache.hadoop.hbase.UserMetrics)2 InterruptedIOException (java.io.InterruptedIOException)1 InvocationTargetException (java.lang.reflect.InvocationTargetException)1 UnknownHostException (java.net.UnknownHostException)1