use of com.github.ambry.server.storagestats.HostPartitionClassStorageStats in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostPartitionClassStorageStatsWithOutdatedNode.
/**
* Test {@link MySqlClusterAggregator#aggregateHostPartitionClassStorageStatsWrappers} but with one node having outdated
* storage stats data. Outdated data shouldn't be used when aggregating valid storage stats.
* @throws Exception
*/
@Test
public void testAggregateHostPartitionClassStorageStatsWithOutdatedNode() throws Exception {
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> upToDateStorageStatsMap = new HashMap<>();
upToDateStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> outdatedStorageStatsMap = new HashMap<>();
outdatedStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 1, 1, Collections.emptyList());
HostPartitionClassStorageStatsWrapper upToDateNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(upToDateStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 0, 1, 1, Collections.emptyList());
HostPartitionClassStorageStatsWrapper outdatedNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(outdatedStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 0, 0, Collections.emptyList());
HostPartitionClassStorageStatsWrapper emptyNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats());
Map<String, HostPartitionClassStorageStatsWrapper> instanceToStatsMap = new LinkedHashMap<>();
instanceToStatsMap.put("Instance_0", upToDateNodeStats);
instanceToStatsMap.put("Instance_1", outdatedNodeStats);
instanceToStatsMap.put("Instance_2", emptyNodeStats);
Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostPartitionClassStorageStatsWrappers(instanceToStatsMap);
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> expectedValid = clusterAggregator.aggregateHostPartitionClassStorageStats(upToDateStorageStatsMap);
Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
use of com.github.ambry.server.storagestats.HostPartitionClassStorageStats in project ambry by linkedin.
the class AccountStatsMySqlStore method queryHostPartitionClassStorageStatsByHost.
@Override
public HostPartitionClassStorageStatsWrapper queryHostPartitionClassStorageStatsByHost(String hostname, int port, Map<String, Set<Integer>> partitionNameAndIds) throws SQLException {
long startTimeMs = System.currentTimeMillis();
hostname = hostnameHelper.simplifyHostname(hostname, port);
Map<Integer, Map<Short, Map<Short, ContainerStorageStats>>> partitionAccountContainerUsage = new HashMap<>();
AtomicLong timestamp = new AtomicLong(0);
accountReportsDao.queryStorageUsageForHost(clusterName, hostname, (partitionId, accountId, containerStats, updatedAtMs) -> {
partitionAccountContainerUsage.computeIfAbsent(partitionId, pid -> new HashMap<>()).computeIfAbsent(accountId, aid -> new HashMap<>()).put(containerStats.getContainerId(), containerStats);
timestamp.set(Math.max(timestamp.get(), updatedAtMs));
});
// Here partitionAccountContainerUsage has partition id, account id and container id as keys of map at each level,
// the value is the storage usage.
// As indicated by the comments above, we have to know the partition class name for each partition id. Luckily, we
// have all the partition ids and we have a map partitionNameAndIds whose key is the partition class name and the
// value is the list of all partition ids belong to the partition class name.
Set<Integer> partitionIds = partitionAccountContainerUsage.keySet();
Map<String, Set<Integer>> partitionNameAndIdsForHost = new HashMap<>();
for (int partitionId : partitionIds) {
boolean found = false;
for (Map.Entry<String, Set<Integer>> namesAndIdsEntry : partitionNameAndIds.entrySet()) {
if (namesAndIdsEntry.getValue().contains(partitionId)) {
partitionNameAndIdsForHost.computeIfAbsent(namesAndIdsEntry.getKey(), k -> new HashSet<>()).add(partitionId);
found = true;
break;
}
}
if (!found) {
storeMetrics.missingPartitionClassNameErrorCount.inc();
logger.error("Can't find partition class name for partition id {}", partitionId);
}
}
HostPartitionClassStorageStats hostPartitionClassStorageStats = new HostPartitionClassStorageStats();
for (Map.Entry<String, Set<Integer>> nameAndIdsEntry : partitionNameAndIdsForHost.entrySet()) {
String partitionClassName = nameAndIdsEntry.getKey();
for (int partitionId : nameAndIdsEntry.getValue()) {
Map<Short, Map<Short, ContainerStorageStats>> accountContainerUsage = partitionAccountContainerUsage.get(partitionId);
for (short accountId : accountContainerUsage.keySet()) {
Map<Short, ContainerStorageStats> containerUsage = accountContainerUsage.get(accountId);
containerUsage.values().stream().forEach(containerStats -> hostPartitionClassStorageStats.addContainerStorageStats(partitionClassName, partitionId, accountId, containerStats));
}
}
}
storeMetrics.queryPartitionClassStatsTimeMs.update(System.currentTimeMillis() - startTimeMs);
return new HostPartitionClassStorageStatsWrapper(new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, timestamp.get(), partitionAccountContainerUsage.size(), partitionAccountContainerUsage.size(), null), hostPartitionClassStorageStats);
}
Aggregations