use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class StorageClusterStatusResource method get.
@GET
@Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
public Response get(@Context final UriInfo uriInfo) {
if (LOG.isTraceEnabled()) {
LOG.trace("GET " + uriInfo.getAbsolutePath());
}
servlet.getMetrics().incrementRequests(1);
try {
ClusterMetrics status = servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS));
StorageClusterStatusModel model = new StorageClusterStatusModel();
model.setRegions(status.getRegionCount());
model.setRequests(status.getRequestCount());
model.setAverageLoad(status.getAverageLoad());
for (Map.Entry<ServerName, ServerMetrics> entry : status.getLiveServerMetrics().entrySet()) {
ServerName sn = entry.getKey();
ServerMetrics load = entry.getValue();
StorageClusterStatusModel.Node node = model.addLiveNode(sn.getHostname() + ":" + Integer.toString(sn.getPort()), sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE));
node.setRequests(load.getRequestCount());
for (RegionMetrics region : load.getRegionMetrics().values()) {
node.addRegion(region.getRegionName(), region.getStoreCount(), region.getStoreFileCount(), (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE), (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE), (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE), region.getReadRequestCount(), region.getCpRequestCount(), region.getWriteRequestCount(), (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE), (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE), (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE), region.getCompactingCellCount(), region.getCompactedCellCount());
}
}
for (ServerName name : status.getDeadServerNames()) {
model.addDeadNode(name.toString());
}
ResponseBuilder response = Response.ok(model);
response.cacheControl(cacheControl);
servlet.getMetrics().incrementSucessfulGetRequests(1);
return response.build();
} catch (IOException e) {
servlet.getMetrics().incrementFailedGetRequests(1);
return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT).entity("Unavailable" + CRLF).build();
}
}
use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class RegionsRecoveryChore method getTableToRegionsByRefCount.
private Map<TableName, List<byte[]>> getTableToRegionsByRefCount(final Map<ServerName, ServerMetrics> serverMetricsMap) {
final Map<TableName, List<byte[]>> tableToReopenRegionsMap = new HashMap<>();
for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
// For each region, each compacted store file can have different ref counts
// We need to find maximum of all such ref counts and if that max count of compacted
// store files is beyond a threshold value, we should reopen the region.
// Here, we take max ref count of all compacted store files and not the cumulative
// count of all compacted store files
final int maxCompactedStoreFileRefCount = regionMetrics.getMaxCompactedStoreFileRefCount();
if (maxCompactedStoreFileRefCount > storeFileRefCountThreshold) {
final byte[] regionName = regionMetrics.getRegionName();
prepareTableToReopenRegionsMap(tableToReopenRegionsMap, regionName, maxCompactedStoreFileRefCount);
}
}
}
return tableToReopenRegionsMap;
}
use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class RegionHDFSBlockLocationFinder method getOldLocality.
private float getOldLocality(ServerName newServer, byte[] regionName, Map<ServerName, ServerMetrics> oldServers) {
ServerMetrics serverMetrics = oldServers.get(newServer);
if (serverMetrics == null) {
return -1f;
}
RegionMetrics regionMetrics = serverMetrics.getRegionMetrics().get(regionName);
if (regionMetrics == null) {
return -1f;
}
return regionMetrics.getDataLocality();
}
use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class RegionHDFSBlockLocationFinder method refreshLocalityChangedRegions.
/**
* If locality for a region has changed, that pretty certainly means our cache is out of date.
* Compare oldStatus and newStatus, refreshing any regions which have moved or changed locality.
*/
private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetrics newStatus) {
if (oldStatus == null || newStatus == null) {
LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", oldStatus, newStatus);
return;
}
Map<ServerName, ServerMetrics> oldServers = oldStatus.getLiveServerMetrics();
Map<ServerName, ServerMetrics> newServers = newStatus.getLiveServerMetrics();
Map<String, RegionInfo> regionsByName = new HashMap<>(cache.asMap().size());
for (RegionInfo regionInfo : cache.asMap().keySet()) {
regionsByName.put(regionInfo.getEncodedName(), regionInfo);
}
for (Map.Entry<ServerName, ServerMetrics> serverEntry : newServers.entrySet()) {
Map<byte[], RegionMetrics> newRegions = serverEntry.getValue().getRegionMetrics();
for (Map.Entry<byte[], RegionMetrics> regionEntry : newRegions.entrySet()) {
String encodedName = RegionInfo.encodeRegionName(regionEntry.getKey());
RegionInfo region = regionsByName.get(encodedName);
if (region == null) {
continue;
}
float newLocality = regionEntry.getValue().getDataLocality();
float oldLocality = getOldLocality(serverEntry.getKey(), regionEntry.getKey(), oldServers);
if (Math.abs(newLocality - oldLocality) > EPSILON) {
LOG.debug("Locality for region {} changed from {} to {}, refreshing cache", region.getEncodedName(), oldLocality, newLocality);
cache.refresh(region);
}
}
}
}
use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class TestRegionSizeCalculator method mockAdmin.
/**
* Creates mock returning RegionLoad info about given servers.
*/
private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception {
Admin mockAdmin = Mockito.mock(Admin.class);
List<RegionMetrics> regionLoads = new ArrayList<>();
for (RegionMetrics regionLoad : regionLoadArray) {
regionLoads.add(regionLoad);
}
when(mockAdmin.getConfiguration()).thenReturn(configuration);
when(mockAdmin.getRegionMetrics(sn, TableName.valueOf("sizeTestTable"))).thenReturn(regionLoads);
return mockAdmin;
}
Aggregations