use of com.github.ambry.server.HostPartitionClassStorageStatsWrapper in project ambry by linkedin.
the class MySqlClusterAggregator method aggregateHostPartitionClassStorageStatsWrappers.
/**
* Aggregate all {@link HostPartitionClassStorageStatsWrapper} to generate two {@link AggregatedPartitionClassStorageStats}s. First
* {@link AggregatedPartitionClassStorageStats} is the sum of all {@link HostPartitionClassStorageStatsWrapper}s. The second
* {@link AggregatedPartitionClassStorageStats} is the valid aggregated storage stats for all replicas of each partition.
* @param statsWrappers A map from instance name to {@link HostPartitionClassStorageStatsWrapper}.
* @return A {@link Pair} of {@link AggregatedPartitionClassStorageStats}.
* @throws IOException
*/
Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> aggregateHostPartitionClassStorageStatsWrappers(Map<String, HostPartitionClassStorageStatsWrapper> statsWrappers) throws IOException {
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> combinedHostPartitionClassStorageStatsMap = new HashMap<>();
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> selectedHostPartitionClassStorageStatsMap = new HashMap<>();
Map<Long, Long> partitionTimestampMap = new HashMap<>();
Map<Long, Long> partitionPhysicalStorageMap = new HashMap<>();
for (Map.Entry<String, HostPartitionClassStorageStatsWrapper> statsWrapperEntry : statsWrappers.entrySet()) {
if (statsWrapperEntry.getValue() == null) {
continue;
}
String instanceName = statsWrapperEntry.getKey();
HostPartitionClassStorageStatsWrapper hostPartitionClassStorageStatsWrapper = statsWrapperEntry.getValue();
HostPartitionClassStorageStats hostPartitionClassStorageStats = hostPartitionClassStorageStatsWrapper.getStats();
HostPartitionClassStorageStats hostPartitionClassStorageStatsCopy1 = new HostPartitionClassStorageStats(hostPartitionClassStorageStats);
HostPartitionClassStorageStats hostPartitionClassStorageStatsCopy2 = new HostPartitionClassStorageStats(hostPartitionClassStorageStats);
combineRawHostPartitionClassStorageStatsMap(combinedHostPartitionClassStorageStatsMap, hostPartitionClassStorageStatsCopy1.getStorageStats());
selectRawHostPartitionClassStorageStatsMap(selectedHostPartitionClassStorageStatsMap, hostPartitionClassStorageStatsCopy2.getStorageStats(), partitionTimestampMap, partitionPhysicalStorageMap, hostPartitionClassStorageStatsWrapper.getHeader().getTimestamp(), instanceName);
}
if (logger.isTraceEnabled()) {
logger.trace("Combined raw HostPartitionClassStorageStats {}", mapper.writeValueAsString(combinedHostPartitionClassStorageStatsMap));
logger.trace("Selected raw HostPartitionClassStorageStats {}", mapper.writeValueAsString(selectedHostPartitionClassStorageStatsMap));
}
AggregatedPartitionClassStorageStats combinedAggregated = new AggregatedPartitionClassStorageStats(aggregateHostPartitionClassStorageStats(combinedHostPartitionClassStorageStatsMap));
AggregatedPartitionClassStorageStats selectedAggregated = new AggregatedPartitionClassStorageStats(aggregateHostPartitionClassStorageStats(selectedHostPartitionClassStorageStatsMap));
if (logger.isTraceEnabled()) {
logger.trace("Aggregated combined {}", mapper.writeValueAsString(combinedAggregated));
logger.trace("Aggregated selected {}", mapper.writeValueAsString(selectedAggregated));
}
return new Pair<>(combinedAggregated, selectedAggregated);
}
use of com.github.ambry.server.HostPartitionClassStorageStatsWrapper in project ambry by linkedin.
the class AccountStatsMySqlStoreIntegrationTest method testHostPartitionClassStorageStats.
/**
* Test methods to store and fetch partition class, partition name partition id and partition class storage stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStorageStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
HostAccountStorageStatsWrapper accountStats1 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper accountStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
HostAccountStorageStatsWrapper accountStats3 = mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<Long> allPartitionKeys = new HashSet<Long>() {
{
addAll(accountStats1.getStats().getStorageStats().keySet());
addAll(accountStats2.getStats().getStorageStats().keySet());
addAll(accountStats3.getStats().getStorageStats().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<Long, String> partitionIdToClassName = new HashMap<>();
int ind = 0;
for (long partitionId : allPartitionKeys) {
partitionIdToClassName.put(partitionId, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
HostPartitionClassStorageStatsWrapper partitionClassStats1 = convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats1, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats2 = convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats2, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats3 = convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats3, partitionIdToClassName);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats1);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats2);
mySqlStore3.storeHostPartitionClassStorageStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<Long, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet().stream().flatMap(ent -> ent.getValue().stream().map(pid -> new Pair<>(ent.getKey(), (long) pid))).collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionIdToClassName, dbPartitionKeyToClassName);
// Fetch HostPartitionClassStorageStats
HostPartitionClassStorageStatsWrapper obtainedStats1 = mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getStats().getStorageStats(), obtainedStats1.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getStats().getStorageStats(), obtainedStats2.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats3 = mySqlStore3.queryHostPartitionClassStorageStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getStats().getStorageStats(), obtainedStats3.getStats().getStorageStats());
// Fetch StatsSnapshot
StatsWrapper obtainedStats = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(StorageStatsUtil.convertHostPartitionClassStorageStatsToStatsSnapshot(obtainedStats1.getStats(), false), obtainedStats.getSnapshot());
mySqlStore3.shutdown();
}
use of com.github.ambry.server.HostPartitionClassStorageStatsWrapper in project ambry by linkedin.
the class AccountStatsMySqlStoreIntegrationTest method convertHostAccountStorageStatsToHostPartitionClassStorageStats.
private HostPartitionClassStorageStatsWrapper convertHostAccountStorageStatsToHostPartitionClassStorageStats(HostAccountStorageStatsWrapper accountStatsWrapper, Map<Long, String> partitionIdToClassName) {
HostPartitionClassStorageStats hostPartitionClassStorageStats = new HostPartitionClassStorageStats();
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStats = accountStatsWrapper.getStats().getStorageStats();
for (long partitionId : storageStats.keySet()) {
Map<Short, Map<Short, ContainerStorageStats>> accountStorageStatsMap = storageStats.get(partitionId);
String partitionClassName = partitionIdToClassName.get(partitionId);
for (short accountId : accountStorageStatsMap.keySet()) {
accountStorageStatsMap.get(accountId).values().forEach(containerStats -> hostPartitionClassStorageStats.addContainerStorageStats(partitionClassName, partitionId, accountId, containerStats));
}
}
return new HostPartitionClassStorageStatsWrapper(new StatsHeader(accountStatsWrapper.getHeader()), hostPartitionClassStorageStats);
}
use of com.github.ambry.server.HostPartitionClassStorageStatsWrapper in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostPartitionClassStorageStats.
/**
* Test basic functionality of {@link MySqlClusterAggregator#aggregateHostPartitionClassStorageStats}.
* @throws Exception
*/
@Test
public void testAggregateHostPartitionClassStorageStats() throws Exception {
int nodeCount = 3;
int numberOfPartitions = 4;
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> storageStatsMap = new HashMap<>();
String[] partitionClassNames = { "default", "newClass" };
for (int i = 0; i < numberOfPartitions; i++) {
String partitionClassName = partitionClassNames[i % partitionClassNames.length];
storageStatsMap.computeIfAbsent(partitionClassName, k -> new HashMap<>()).put((long) i, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, i + 3, 3, 10000L, 2, 10));
}
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, numberOfPartitions, numberOfPartitions, Collections.emptyList());
HostPartitionClassStorageStatsWrapper nodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(storageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 0, 0, Collections.emptyList());
HostPartitionClassStorageStatsWrapper emptyStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats());
Map<String, HostPartitionClassStorageStatsWrapper> instanceToStatsMap = new HashMap<>();
for (int i = 0; i < nodeCount; i++) {
instanceToStatsMap.put("Instance_" + i, new HostPartitionClassStorageStatsWrapper(new StatsHeader(nodeStats.getHeader()), new HostPartitionClassStorageStats(nodeStats.getStats())));
}
instanceToStatsMap.put("Instance_" + nodeCount, emptyStats);
Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostPartitionClassStorageStatsWrappers(instanceToStatsMap);
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> expectedAggregatedStorageStatsMap = clusterAggregator.aggregateHostPartitionClassStorageStats(storageStatsMap);
Assert.assertEquals(expectedAggregatedStorageStatsMap, aggregatedRawAndValidStats.getSecond().getStorageStats());
assertAggregatedRawStatsForPartitionClassStorageStats(aggregatedRawAndValidStats.getFirst().getStorageStats(), expectedAggregatedStorageStatsMap, nodeCount);
}
use of com.github.ambry.server.HostPartitionClassStorageStatsWrapper in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostPartitionClassStorageStatsWithOutdatedNode.
/**
* Test {@link MySqlClusterAggregator#aggregateHostPartitionClassStorageStatsWrappers} but with one node having outdated
* storage stats data. Outdated data shouldn't be used when aggregating valid storage stats.
* @throws Exception
*/
@Test
public void testAggregateHostPartitionClassStorageStatsWithOutdatedNode() throws Exception {
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> upToDateStorageStatsMap = new HashMap<>();
upToDateStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> outdatedStorageStatsMap = new HashMap<>();
outdatedStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 1, 1, Collections.emptyList());
HostPartitionClassStorageStatsWrapper upToDateNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(upToDateStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 0, 1, 1, Collections.emptyList());
HostPartitionClassStorageStatsWrapper outdatedNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(outdatedStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 0, 0, Collections.emptyList());
HostPartitionClassStorageStatsWrapper emptyNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats());
Map<String, HostPartitionClassStorageStatsWrapper> instanceToStatsMap = new LinkedHashMap<>();
instanceToStatsMap.put("Instance_0", upToDateNodeStats);
instanceToStatsMap.put("Instance_1", outdatedNodeStats);
instanceToStatsMap.put("Instance_2", emptyNodeStats);
Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostPartitionClassStorageStatsWrappers(instanceToStatsMap);
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> expectedValid = clusterAggregator.aggregateHostPartitionClassStorageStats(upToDateStorageStatsMap);
Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
Aggregations