use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class TestStochasticLoadBalancer method mockServerMetricsWithCpRequests.
private ServerMetrics mockServerMetricsWithCpRequests(List<RegionInfo> regionsOnServer, long cpRequestCount) {
ServerMetrics serverMetrics = mock(ServerMetrics.class);
Map<byte[], RegionMetrics> regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (RegionInfo info : regionsOnServer) {
RegionMetrics rl = mock(RegionMetrics.class);
when(rl.getReadRequestCount()).thenReturn(0L);
when(rl.getCpRequestCount()).thenReturn(cpRequestCount);
when(rl.getWriteRequestCount()).thenReturn(0L);
when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
when(rl.getStoreFileSize()).thenReturn(Size.ZERO);
regionLoadMap.put(info.getRegionName(), rl);
}
when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap);
return serverMetrics;
}
use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class TestRegionSizeCalculator method mockRegion.
/**
* Creates mock of region with given name and size.
*
* @param fileSizeMb number of megabytes occupied by region in file store in megabytes
*/
private RegionMetrics mockRegion(String regionName, int fileSizeMb) {
RegionMetrics region = Mockito.mock(RegionMetrics.class);
when(region.getRegionName()).thenReturn(Bytes.toBytes(regionName));
when(region.getNameAsString()).thenReturn(regionName);
when(region.getStoreFileSize()).thenReturn(new Size(fileSizeMb, Size.Unit.MEGABYTE));
return region;
}
use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class TestUtils method createDummyClusterMetrics.
public static ClusterMetrics createDummyClusterMetrics() {
Map<ServerName, ServerMetrics> serverMetricsMap = new HashMap<>();
// host1
List<RegionMetrics> regionMetricsList = new ArrayList<>();
List<UserMetrics> userMetricsList = new ArrayList<>();
userMetricsList.add(createUserMetrics("FOO", 1, 2, 4));
userMetricsList.add(createUserMetrics("BAR", 2, 3, 3));
regionMetricsList.add(createRegionMetrics("table1,,1.00000000000000000000000000000000.", 100, 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.1f, 100, 100, "2019-07-22 00:00:00"));
regionMetricsList.add(createRegionMetrics("table2,1,2.00000000000000000000000000000001.", 200, 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.2f, 50, 200, "2019-07-22 00:00:01"));
regionMetricsList.add(createRegionMetrics("namespace:table3,,3_0001.00000000000000000000000000000002.", 300, 150, 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02"));
ServerName host1 = ServerName.valueOf("host1.apache.com", 1000, 1);
serverMetricsMap.put(host1, createServerMetrics(host1, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 100, regionMetricsList, userMetricsList));
// host2
regionMetricsList.clear();
userMetricsList.clear();
userMetricsList.add(createUserMetrics("FOO", 5, 7, 3));
userMetricsList.add(createUserMetrics("BAR", 4, 8, 4));
regionMetricsList.add(createRegionMetrics("table1,1,4.00000000000000000000000000000003.", 100, 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.4f, 50, 100, "2019-07-22 00:00:03"));
regionMetricsList.add(createRegionMetrics("table2,,5.00000000000000000000000000000004.", 200, 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.5f, 150, 200, "2019-07-22 00:00:04"));
regionMetricsList.add(createRegionMetrics("namespace:table3,,6.00000000000000000000000000000005.", 300, 150, 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05"));
ServerName host2 = ServerName.valueOf("host2.apache.com", 1001, 2);
serverMetricsMap.put(host2, createServerMetrics(host2, 200, new Size(16, Size.Unit.GIGABYTE), new Size(32, Size.Unit.GIGABYTE), 200, regionMetricsList, userMetricsList));
ServerName host3 = ServerName.valueOf("host3.apache.com", 1002, 3);
return ClusterMetricsBuilder.newBuilder().setHBaseVersion("3.0.0-SNAPSHOT").setClusterId("01234567-89ab-cdef-0123-456789abcdef").setLiveServerMetrics(serverMetricsMap).setDeadServerNames(Collections.singletonList(host3)).setRegionsInTransition(Collections.singletonList(new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")).setStartKey(new byte[0]).setEndKey(new byte[0]).setOffline(true).setReplicaId(0).setRegionId(0).setSplit(false).build(), RegionState.State.OFFLINE, host3))).build();
}
use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class TestRegionServerReadRequestMetrics method updateMetricsMap.
private static void updateMetricsMap() throws IOException, InterruptedException {
for (Metric metric : Metric.values()) {
requestsMapPrev.put(metric, requestsMap.get(metric));
}
ServerMetrics serverMetrics = null;
RegionMetrics regionMetricsOuter = null;
boolean metricsUpdated = false;
for (int i = 0; i < MAX_TRY; i++) {
for (ServerName serverName : serverNames) {
serverMetrics = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().get(serverName);
Map<byte[], RegionMetrics> regionMetrics = serverMetrics.getRegionMetrics();
RegionMetrics regionMetric = regionMetrics.get(regionInfo.getRegionName());
if (regionMetric != null) {
regionMetricsOuter = regionMetric;
for (Metric metric : Metric.values()) {
if (getReadRequest(serverMetrics, regionMetric, metric) > requestsMapPrev.get(metric)) {
for (Metric metricInner : Metric.values()) {
requestsMap.put(metricInner, getReadRequest(serverMetrics, regionMetric, metricInner));
}
metricsUpdated = true;
break;
}
}
}
}
if (metricsUpdated) {
break;
}
Thread.sleep(SLEEP_MS);
}
if (!metricsUpdated) {
for (Metric metric : Metric.values()) {
requestsMap.put(metric, getReadRequest(serverMetrics, regionMetricsOuter, metric));
}
}
}
use of org.apache.hadoop.hbase.RegionMetrics in project hbase by apache.
the class TestRegionsRecoveryChore method testRegionReopensWithStoreRefConfig.
@Test
public void testRegionReopensWithStoreRefConfig() throws Exception {
regionNo = 0;
ClusterMetrics clusterMetrics = TestRegionsRecoveryChore.getClusterMetrics(4);
final Map<ServerName, ServerMetrics> serverMetricsMap = clusterMetrics.getLiveServerMetrics();
LOG.debug("All Region Names with refCount....");
for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
Map<byte[], RegionMetrics> regionMetricsMap = serverMetrics.getRegionMetrics();
for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
LOG.debug("name: " + new String(regionMetrics.getRegionName()) + " refCount: " + regionMetrics.getStoreRefCount());
}
}
Mockito.when(hMaster.getClusterMetrics()).thenReturn(clusterMetrics);
Mockito.when(hMaster.getAssignmentManager()).thenReturn(assignmentManager);
for (byte[] regionName : REGION_NAME_LIST) {
Mockito.when(assignmentManager.getRegionInfo(regionName)).thenReturn(TestRegionsRecoveryChore.getRegionInfo(regionName));
}
Stoppable stoppable = new StoppableImplementation();
Configuration configuration = getCustomConf();
configuration.setInt("hbase.regions.recovery.store.file.ref.count", 300);
regionsRecoveryChore = new RegionsRecoveryChore(stoppable, configuration, hMaster);
regionsRecoveryChore.chore();
// Verify that we need to reopen regions of 2 tables
Mockito.verify(hMaster, Mockito.times(2)).reopenRegions(Mockito.any(), Mockito.anyList(), Mockito.anyLong(), Mockito.anyLong());
Mockito.verify(hMaster, Mockito.times(1)).getClusterMetrics();
// Verify that we need to reopen total 3 regions that have refCount > 300
Mockito.verify(hMaster, Mockito.times(3)).getAssignmentManager();
Mockito.verify(assignmentManager, Mockito.times(3)).getRegionInfo(Mockito.any());
}
Aggregations