use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class HelixClusterAggregatorTest method generateNodeStats.
/**
* Given a {@link List} of {@link StatsSnapshot}s and a timestamp generate a {@link StatsWrapper} that would have been
* produced by a node.
* @param storeSnapshots a {@link List} of store level {@link StatsSnapshot}s.
* @param timestamp the timestamp to be attached to the generated {@link StatsWrapper}
* @return the generated node level {@link StatsWrapper}
*/
private StatsWrapper generateNodeStats(List<StatsSnapshot> storeSnapshots, long timestamp) {
Map<String, StatsSnapshot> partitionMap = new HashMap<>();
long total = 0;
int numbOfPartitions = storeSnapshots.size();
for (int i = 0; i < numbOfPartitions; i++) {
StatsSnapshot partitionSnapshot = storeSnapshots.get(i);
partitionMap.put(String.format("partition_%d", i), partitionSnapshot);
total += partitionSnapshot.getValue();
}
StatsSnapshot nodeSnapshot = new StatsSnapshot(total, partitionMap);
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.QUOTA, timestamp, numbOfPartitions, numbOfPartitions, Collections.EMPTY_LIST);
return new StatsWrapper(header, nodeSnapshot);
}
use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class TestUtils method generateNodeStats.
/**
* Given a {@link List} of {@link StatsSnapshot}s and a timestamp generate a {@link StatsWrapper} that would have been
* produced by a node.
* @param storeSnapshots a {@link List} of store level {@link StatsSnapshot}s.
* @param timestamp the timestamp to be attached to the generated {@link StatsWrapper}
* @param type the type of stats report to generate on this node
* @return the generated node level {@link StatsWrapper}
*/
public static StatsWrapper generateNodeStats(List<StatsSnapshot> storeSnapshots, long timestamp, StatsReportType type) {
long total = 0;
int numbOfPartitions = storeSnapshots.size();
Map<String, StatsSnapshot> partitionMap = new HashMap<>();
Map<String, StatsSnapshot> partitionClassMap = new HashMap<>();
String[] PARTITION_CLASS = new String[] { "PartitionClass1", "PartitionClass2" };
for (int i = 0; i < numbOfPartitions; i++) {
String partitionIdStr = Utils.statsPartitionKey(i);
StatsSnapshot partitionSnapshot = storeSnapshots.get(i);
partitionMap.put(partitionIdStr, partitionSnapshot);
total += partitionSnapshot.getValue();
if (type == StatsReportType.PARTITION_CLASS_REPORT) {
String partitionClassStr = PARTITION_CLASS[i % PARTITION_CLASS.length];
StatsSnapshot partitionClassSnapshot = partitionClassMap.getOrDefault(partitionClassStr, new StatsSnapshot(0L, new HashMap<>()));
partitionClassSnapshot.setValue(partitionClassSnapshot.getValue() + partitionSnapshot.getValue());
partitionClassSnapshot.getSubMap().put(partitionIdStr, partitionSnapshot);
partitionClassMap.put(partitionClassStr, partitionClassSnapshot);
}
}
StatsSnapshot nodeSnapshot = null;
if (type == StatsReportType.ACCOUNT_REPORT) {
nodeSnapshot = new StatsSnapshot(total, partitionMap);
} else if (type == StatsReportType.PARTITION_CLASS_REPORT) {
nodeSnapshot = new StatsSnapshot(total, partitionClassMap);
}
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, timestamp, numbOfPartitions, numbOfPartitions, Collections.emptyList());
return new StatsWrapper(header, nodeSnapshot);
}
use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostAccountStorageStatsWithDifferentNumberOfStores.
/**
* Test {@link MySqlClusterAggregator#aggregateHostAccountStorageStatsWrappers}, but with different numbers of partitions
* from different nodes.
* @throws Exception
*/
@Test
public void testAggregateHostAccountStorageStatsWithDifferentNumberOfStores() throws Exception {
// This storage stats has only 2 partitions, partition 0 and partition 1. Each partition has only one account and one container.
String statsInJson = "{'0': {'0': {'0': {'containerId':0, 'logicalStorageUsage':10, 'physicalStorageUsage':20, 'numberOfBlobs':10}}}, '1': {'0': {'0': {'containerId':0, 'logicalStorageUsage':20, 'physicalStorageUsage':40, 'numberOfBlobs':20}}}}";
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStatsMap1 = objectMapper.readValue(statsInJson.replace("'", "\""), HostAccountStorageStats.class).getStorageStats();
// This storage stats has only 3 partitions, partition 0, partition 1 and partition 2. Each partition has only one account and one container.
statsInJson = "{'0': {'0': {'0': {'containerId':0, 'logicalStorageUsage':30, 'physicalStorageUsage':60, 'numberOfBlobs':30}}}, '1': {'0': {'0': {'containerId':0, 'logicalStorageUsage':40, 'physicalStorageUsage':80, 'numberOfBlobs':40}}}, '2': {'0': {'0': {'containerId':0, 'logicalStorageUsage':50, 'physicalStorageUsage':100, 'numberOfBlobs':50}}}}";
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStatsMap2 = objectMapper.readValue(statsInJson.replace("'", "\""), HostAccountStorageStats.class).getStorageStats();
// Raw combined storage stats should only have one account and one container, and its storage stats is the sum of storage stats from all nodes.
statsInJson = "{'0': {'0': {'containerId':0, 'logicalStorageUsage':150, 'physicalStorageUsage':300, 'numberOfBlobs':150}}}";
Map<Short, Map<Short, ContainerStorageStats>> expectedRaw = objectMapper.readValue(statsInJson.replace("'", "\""), new TypeReference<Map<Short, Map<Short, ContainerStorageStats>>>() {
});
// Aggregated storage stats should only have one account and one container, and its storage stats is the sum of storage stats from second node,
// since second node's physical storage usage is larger than first node at every partition.
statsInJson = "{'0': {'0': {'containerId':0, 'logicalStorageUsage':120, 'physicalStorageUsage':240, 'numberOfBlobs':120}}}";
Map<Short, Map<Short, ContainerStorageStats>> expectedValid = objectMapper.readValue(statsInJson.replace("'", "\""), new TypeReference<Map<Short, Map<Short, ContainerStorageStats>>>() {
});
// StorageStatsMap1 only have 2 store stats, StorageStatsMap2 has 3
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 2, 2, Collections.emptyList());
HostAccountStorageStatsWrapper nodeStats1 = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(storageStatsMap1));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 3, 3, Collections.emptyList());
HostAccountStorageStatsWrapper nodeStats2 = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(storageStatsMap2));
Map<String, HostAccountStorageStatsWrapper> instanceStatsMap = new HashMap<>();
instanceStatsMap.put("Instance_1", nodeStats1);
instanceStatsMap.put("Instance_2", nodeStats2);
Pair<AggregatedAccountStorageStats, AggregatedAccountStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostAccountStorageStatsWrappers(instanceStatsMap);
Assert.assertEquals(expectedRaw, aggregatedRawAndValidStats.getFirst().getStorageStats());
Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostAccountStorageStatsWithOutdatedNode.
/**
* Test {@link MySqlClusterAggregator#aggregateHostAccountStorageStatsWrappers} but with one node having outdated
* storage stats data. Outdated data shouldn't be used when aggregating valid storage usage.
* @throws Exception
*/
@Test
public void testAggregateHostAccountStorageStatsWithOutdatedNode() throws Exception {
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> upToDateStorageStatsMap = new HashMap<>();
upToDateStorageStatsMap.put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> outdatedStorageStatsMap = new HashMap<>();
outdatedStorageStatsMap.put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 6, 3, 10000L, 2, 10));
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 1, 1, Collections.emptyList());
HostAccountStorageStatsWrapper upToDateNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(upToDateStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 0, 1, 1, Collections.emptyList());
HostAccountStorageStatsWrapper outdatedNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(outdatedStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 0, 0, Collections.emptyList());
HostAccountStorageStatsWrapper emptyNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats());
Map<String, HostAccountStorageStatsWrapper> instanceToStatsMap = new LinkedHashMap<>();
instanceToStatsMap.put("Instance_0", upToDateNodeStats);
instanceToStatsMap.put("Instance_1", outdatedNodeStats);
instanceToStatsMap.put("Instance_2", emptyNodeStats);
Pair<AggregatedAccountStorageStats, AggregatedAccountStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostAccountStorageStatsWrappers(instanceToStatsMap);
Map<Short, Map<Short, ContainerStorageStats>> expectedValid = clusterAggregator.aggregateHostAccountStorageStats(upToDateStorageStatsMap);
Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostPartitionClassStorageStats.
/**
* Test basic functionality of {@link MySqlClusterAggregator#aggregateHostPartitionClassStorageStats}.
* @throws Exception
*/
@Test
public void testAggregateHostPartitionClassStorageStats() throws Exception {
int nodeCount = 3;
int numberOfPartitions = 4;
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> storageStatsMap = new HashMap<>();
String[] partitionClassNames = { "default", "newClass" };
for (int i = 0; i < numberOfPartitions; i++) {
String partitionClassName = partitionClassNames[i % partitionClassNames.length];
storageStatsMap.computeIfAbsent(partitionClassName, k -> new HashMap<>()).put((long) i, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, i + 3, 3, 10000L, 2, 10));
}
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, numberOfPartitions, numberOfPartitions, Collections.emptyList());
HostPartitionClassStorageStatsWrapper nodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(storageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 0, 0, Collections.emptyList());
HostPartitionClassStorageStatsWrapper emptyStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats());
Map<String, HostPartitionClassStorageStatsWrapper> instanceToStatsMap = new HashMap<>();
for (int i = 0; i < nodeCount; i++) {
instanceToStatsMap.put("Instance_" + i, new HostPartitionClassStorageStatsWrapper(new StatsHeader(nodeStats.getHeader()), new HostPartitionClassStorageStats(nodeStats.getStats())));
}
instanceToStatsMap.put("Instance_" + nodeCount, emptyStats);
Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostPartitionClassStorageStatsWrappers(instanceToStatsMap);
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> expectedAggregatedStorageStatsMap = clusterAggregator.aggregateHostPartitionClassStorageStats(storageStatsMap);
Assert.assertEquals(expectedAggregatedStorageStatsMap, aggregatedRawAndValidStats.getSecond().getStorageStats());
assertAggregatedRawStatsForPartitionClassStorageStats(aggregatedRawAndValidStats.getFirst().getStorageStats(), expectedAggregatedStorageStatsMap, nodeCount);
}
Aggregations