use of org.apache.hadoop.hbase.hbtop.Record in project hbase by apache.
the class TestTopScreenModel method testSort.
@Test
public void testSort() {
// The sort key is LOCALITY
topScreenModel.setSortFieldAndFields(Field.LOCALITY, fields);
FieldValue previous = null;
// Test for ascending sort
topScreenModel.refreshMetricsData();
for (Record record : topScreenModel.getRecords()) {
FieldValue current = record.get(Field.LOCALITY);
if (previous != null) {
assertTrue(current.compareTo(previous) < 0);
}
previous = current;
}
// Test for descending sort
topScreenModel.switchSortOrder();
topScreenModel.refreshMetricsData();
previous = null;
for (Record record : topScreenModel.getRecords()) {
FieldValue current = record.get(Field.LOCALITY);
if (previous != null) {
assertTrue(current.compareTo(previous) > 0);
}
previous = current;
}
}
use of org.apache.hadoop.hbase.hbtop.Record in project hbase by apache.
the class TestTopScreenModel method testFilters.
@Test
public void testFilters() {
topScreenModel.addFilter("TABLE==table1", false);
topScreenModel.refreshMetricsData();
for (Record record : topScreenModel.getRecords()) {
FieldValue value = record.get(Field.TABLE);
assertThat(value.asString(), is("table1"));
}
topScreenModel.clearFilters();
topScreenModel.addFilter("TABLE==TABLE1", false);
topScreenModel.refreshMetricsData();
assertThat(topScreenModel.getRecords().size(), is(0));
// Test for ignore case
topScreenModel.clearFilters();
topScreenModel.addFilter("TABLE==TABLE1", true);
topScreenModel.refreshMetricsData();
for (Record record : topScreenModel.getRecords()) {
FieldValue value = record.get(Field.TABLE);
assertThat(value.asString(), is("table1"));
}
}
use of org.apache.hadoop.hbase.hbtop.Record in project hbase by apache.
the class TopScreenModel method refreshRecords.
private void refreshRecords(ClusterMetrics clusterMetrics) {
List<Record> records = currentMode.getRecords(clusterMetrics, pushDownFilters);
// Filter and sort
records = records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))).sorted((recordLeft, recordRight) -> {
FieldValue left = recordLeft.get(currentSortField);
FieldValue right = recordRight.get(currentSortField);
return (ascendingSort ? 1 : -1) * left.compareTo(right);
}).collect(Collectors.toList());
this.records = Collections.unmodifiableList(records);
}
use of org.apache.hadoop.hbase.hbtop.Record in project hbase by apache.
the class RegionModeStrategy method createRecord.
private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMetrics, long lastReportTimestamp) {
Record.Builder builder = Record.builder();
String regionName = regionMetrics.getNameAsString();
builder.put(Field.REGION_NAME, regionName);
String namespaceName = "";
String tableName = "";
String region = "";
String startKey = "";
String startCode = "";
String replicaId = "";
try {
byte[][] elements = RegionInfo.parseRegionName(regionMetrics.getRegionName());
TableName tn = TableName.valueOf(elements[0]);
namespaceName = tn.getNamespaceAsString();
tableName = tn.getQualifierAsString();
startKey = Bytes.toStringBinary(elements[1]);
startCode = Bytes.toString(elements[2]);
replicaId = elements.length == 4 ? Integer.valueOf(Bytes.toString(elements[3])).toString() : "";
region = RegionInfo.encodeRegionName(regionMetrics.getRegionName());
} catch (IOException ignored) {
}
builder.put(Field.NAMESPACE, namespaceName);
builder.put(Field.TABLE, tableName);
builder.put(Field.START_CODE, startCode);
builder.put(Field.REPLICA_ID, replicaId);
builder.put(Field.REGION, region);
builder.put(Field.START_KEY, startKey);
builder.put(Field.REGION_SERVER, serverMetrics.getServerName().toShortString());
builder.put(Field.LONG_REGION_SERVER, serverMetrics.getServerName().getServerName());
RequestCountPerSecond requestCountPerSecond = requestCountPerSecondMap.get(regionName);
if (requestCountPerSecond == null) {
requestCountPerSecond = new RequestCountPerSecond();
requestCountPerSecondMap.put(regionName, requestCountPerSecond);
}
requestCountPerSecond.refresh(lastReportTimestamp, regionMetrics.getReadRequestCount(), regionMetrics.getFilteredReadRequestCount(), regionMetrics.getWriteRequestCount());
builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getReadRequestCountPerSecond());
builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getFilteredReadRequestCountPerSecond());
builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getWriteRequestCountPerSecond());
builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond());
builder.put(Field.STORE_FILE_SIZE, regionMetrics.getStoreFileSize());
builder.put(Field.UNCOMPRESSED_STORE_FILE_SIZE, regionMetrics.getUncompressedStoreFileSize());
builder.put(Field.NUM_STORE_FILES, regionMetrics.getStoreFileCount());
builder.put(Field.MEM_STORE_SIZE, regionMetrics.getMemStoreSize());
builder.put(Field.LOCALITY, regionMetrics.getDataLocality());
long compactingCellCount = regionMetrics.getCompactingCellCount();
long compactedCellCount = regionMetrics.getCompactedCellCount();
float compactionProgress = 0;
if (compactedCellCount > 0) {
compactionProgress = 100 * ((float) compactedCellCount / compactingCellCount);
}
builder.put(Field.COMPACTING_CELL_COUNT, compactingCellCount);
builder.put(Field.COMPACTED_CELL_COUNT, compactedCellCount);
builder.put(Field.COMPACTION_PROGRESS, compactionProgress);
FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss");
long lastMajorCompactionTimestamp = regionMetrics.getLastMajorCompactionTimestamp();
builder.put(Field.LAST_MAJOR_COMPACTION_TIME, lastMajorCompactionTimestamp == 0 ? "" : df.format(lastMajorCompactionTimestamp));
return builder.build();
}
use of org.apache.hadoop.hbase.hbtop.Record in project hbase by apache.
the class TopScreenPresenter method drillDown.
public void drillDown() {
Record selectedRecord = getSelectedRecord();
if (selectedRecord == null) {
return;
}
if (topScreenModel.drillDown(selectedRecord)) {
reset();
refresh(true);
}
}
Aggregations