use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class RegionHDFSBlockLocationFinder method refreshLocalityChangedRegions.
/**
* If locality for a region has changed, that pretty certainly means our cache is out of date.
* Compare oldStatus and newStatus, refreshing any regions which have moved or changed locality.
*/
private void refreshLocalityChangedRegions(ClusterMetrics oldStatus, ClusterMetrics newStatus) {
if (oldStatus == null || newStatus == null) {
LOG.debug("Skipping locality-based refresh due to oldStatus={}, newStatus={}", oldStatus, newStatus);
return;
}
Map<ServerName, ServerMetrics> oldServers = oldStatus.getLiveServerMetrics();
Map<ServerName, ServerMetrics> newServers = newStatus.getLiveServerMetrics();
Map<String, RegionInfo> regionsByName = new HashMap<>(cache.asMap().size());
for (RegionInfo regionInfo : cache.asMap().keySet()) {
regionsByName.put(regionInfo.getEncodedName(), regionInfo);
}
for (Map.Entry<ServerName, ServerMetrics> serverEntry : newServers.entrySet()) {
Map<byte[], RegionMetrics> newRegions = serverEntry.getValue().getRegionMetrics();
for (Map.Entry<byte[], RegionMetrics> regionEntry : newRegions.entrySet()) {
String encodedName = RegionInfo.encodeRegionName(regionEntry.getKey());
RegionInfo region = regionsByName.get(encodedName);
if (region == null) {
continue;
}
float newLocality = regionEntry.getValue().getDataLocality();
float oldLocality = getOldLocality(serverEntry.getKey(), regionEntry.getKey(), oldServers);
if (Math.abs(newLocality - oldLocality) > EPSILON) {
LOG.debug("Locality for region {} changed from {} to {}, refreshing cache", region.getEncodedName(), oldLocality, newLocality);
cache.refresh(region);
}
}
}
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class HMaster method getClusterMetricsWithoutCoprocessor.
public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet<Option> options) throws InterruptedIOException {
ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder();
// we return all information to client if the list of Option is empty.
if (options.isEmpty()) {
options = EnumSet.allOf(Option.class);
}
// TASKS and/or LIVE_SERVERS will populate this map, which will be given to the builder if
// not null after option processing completes.
Map<ServerName, ServerMetrics> serverMetricsMap = null;
for (Option opt : options) {
switch(opt) {
case HBASE_VERSION:
builder.setHBaseVersion(VersionInfo.getVersion());
break;
case CLUSTER_ID:
builder.setClusterId(getClusterId());
break;
case MASTER:
builder.setMasterName(getServerName());
break;
case BACKUP_MASTERS:
builder.setBackerMasterNames(getBackupMasters());
break;
case TASKS:
{
// Master tasks
builder.setMasterTasks(TaskMonitor.get().getTasks().stream().map(task -> ServerTaskBuilder.newBuilder().setDescription(task.getDescription()).setStatus(task.getStatus()).setState(ServerTask.State.valueOf(task.getState().name())).setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTimestamp()).build()).collect(Collectors.toList()));
// Add entries to serverMetricsMap for all live servers, if we haven't already done so
if (serverMetricsMap == null) {
serverMetricsMap = getOnlineServers();
}
break;
}
case LIVE_SERVERS:
{
// Add entries to serverMetricsMap for all live servers, if we haven't already done so
if (serverMetricsMap == null) {
serverMetricsMap = getOnlineServers();
}
break;
}
case DEAD_SERVERS:
{
if (serverManager != null) {
builder.setDeadServerNames(new ArrayList<>(serverManager.getDeadServers().copyServerNames()));
}
break;
}
case MASTER_COPROCESSORS:
{
if (cpHost != null) {
builder.setMasterCoprocessorNames(Arrays.asList(getMasterCoprocessors()));
}
break;
}
case REGIONS_IN_TRANSITION:
{
if (assignmentManager != null) {
builder.setRegionsInTransition(assignmentManager.getRegionStates().getRegionsStateInTransition());
}
break;
}
case BALANCER_ON:
{
if (loadBalancerTracker != null) {
builder.setBalancerOn(loadBalancerTracker.isBalancerOn());
}
break;
}
case MASTER_INFO_PORT:
{
if (infoServer != null) {
builder.setMasterInfoPort(infoServer.getPort());
}
break;
}
case SERVERS_NAME:
{
if (serverManager != null) {
builder.setServerNames(serverManager.getOnlineServersList());
}
break;
}
case TABLE_TO_REGIONS_COUNT:
{
if (isActiveMaster() && isInitialized() && assignmentManager != null) {
try {
Map<TableName, RegionStatesCount> tableRegionStatesCountMap = new HashMap<>();
Map<String, TableDescriptor> tableDescriptorMap = getTableDescriptors().getAll();
for (TableDescriptor tableDescriptor : tableDescriptorMap.values()) {
TableName tableName = tableDescriptor.getTableName();
RegionStatesCount regionStatesCount = assignmentManager.getRegionStatesCount(tableName);
tableRegionStatesCountMap.put(tableName, regionStatesCount);
}
builder.setTableRegionStatesCount(tableRegionStatesCountMap);
} catch (IOException e) {
LOG.error("Error while populating TABLE_TO_REGIONS_COUNT for Cluster Metrics..", e);
}
}
break;
}
}
}
if (serverMetricsMap != null) {
builder.setLiveServerMetrics(serverMetricsMap);
}
return builder.build();
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class Action method unbalanceRegions.
protected void unbalanceRegions(ClusterMetrics clusterStatus, List<ServerName> fromServers, List<ServerName> toServers, double fractionOfRegions) throws Exception {
List<byte[]> victimRegions = new LinkedList<>();
for (Map.Entry<ServerName, ServerMetrics> entry : clusterStatus.getLiveServerMetrics().entrySet()) {
ServerName sn = entry.getKey();
ServerMetrics serverLoad = entry.getValue();
// Ugh.
List<byte[]> regions = new LinkedList<>(serverLoad.getRegionMetrics().keySet());
int victimRegionCount = (int) Math.ceil(fractionOfRegions * regions.size());
getLogger().debug("Removing {} regions from {}", victimRegionCount, sn);
for (int i = 0; i < victimRegionCount; ++i) {
int victimIx = RandomUtils.nextInt(0, regions.size());
String regionId = RegionInfo.encodeRegionName(regions.remove(victimIx));
victimRegions.add(Bytes.toBytes(regionId));
}
}
getLogger().info("Moving {} regions from {} servers to {} different servers", victimRegions.size(), fromServers.size(), toServers.size());
Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin();
for (byte[] victimRegion : victimRegions) {
// trying to stop the monkey.
if (context.isStopping()) {
break;
}
int targetIx = RandomUtils.nextInt(0, toServers.size());
admin.move(victimRegion, toServers.get(targetIx));
}
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method getMetricsWithLocality.
private ClusterMetrics getMetricsWithLocality(ServerName serverName, byte[] region, float locality) {
RegionMetrics regionMetrics = mock(RegionMetrics.class);
when(regionMetrics.getDataLocality()).thenReturn(locality);
Map<byte[], RegionMetrics> regionMetricsMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
regionMetricsMap.put(region, regionMetrics);
ServerMetrics serverMetrics = mock(ServerMetrics.class);
when(serverMetrics.getRegionMetrics()).thenReturn(regionMetricsMap);
Map<ServerName, ServerMetrics> serverMetricsMap = new HashMap<>();
serverMetricsMap.put(serverName, serverMetrics);
ClusterMetrics metrics = mock(ClusterMetrics.class);
when(metrics.getLiveServerMetrics()).thenReturn(serverMetricsMap);
return metrics;
}
use of org.apache.hadoop.hbase.ServerMetrics in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method testMapHostNameToServerName.
@Test
public void testMapHostNameToServerName() throws Exception {
assertTrue(finder.mapHostNameToServerName(null).isEmpty());
List<String> hosts = new ArrayList<>();
for (int i = 0; i < 10; i += 2) {
hosts.add("host-" + i);
}
assertTrue(finder.mapHostNameToServerName(hosts).isEmpty());
Map<ServerName, ServerMetrics> serverMetrics = new HashMap<>();
for (int i = 0; i < 10; i += 2) {
ServerName sn = ServerName.valueOf("host-" + i, 12345, 12345);
serverMetrics.put(sn, null);
}
ClusterMetrics metrics = mock(ClusterMetrics.class);
when(metrics.getLiveServerMetrics()).thenReturn(serverMetrics);
finder.setClusterMetrics(metrics);
List<ServerName> sns = finder.mapHostNameToServerName(hosts);
assertEquals(5, sns.size());
for (int i = 0; i < 5; i++) {
ServerName sn = sns.get(i);
assertEquals("host-" + (2 * i), sn.getHostname());
assertEquals(12345, sn.getPort());
assertEquals(12345, sn.getStartcode());
}
}
Aggregations