use of com.github.ambry.server.storagestats.ContainerStorageStats in project ambry by linkedin.
the class BlobStoreStatsTest method verifyContainerStorageStatsAndGetTotalValidSize.
/**
* Verify the correctness of valid data size information per container returned by BlobStoreStats and return the
* total valid data size of all containers.
* @param blobStoreStats the {@link BlobStoreStats} to be verified
* @param referenceTimeInMs the reference time in ms until which deletes and expiration are relevant
* @return the total valid data size of all containers (from all serviceIds)
*/
private long verifyContainerStorageStatsAndGetTotalValidSize(BlobStoreStats blobStoreStats, long referenceTimeInMs) throws StoreException {
Map<String, Pair<AtomicLong, AtomicLong>> deleteTombstoneStats = generateDeleteTombstoneStats();
Map<Short, Map<Short, ContainerStorageStats>> actualContainerStorageStatsMap = blobStoreStats.getContainerStorageStats(referenceTimeInMs);
Map<Short, Map<Short, ContainerStorageStats>> expectedContainerStorageStatsMap = getContainerStorageStats(referenceTimeInMs, state.time.milliseconds(), deleteTombstoneStats);
long totalValidSize = 0L;
for (Map.Entry<Short, Map<Short, ContainerStorageStats>> expectedContainerStorageStatsEntry : expectedContainerStorageStatsMap.entrySet()) {
short accountId = expectedContainerStorageStatsEntry.getKey();
assertTrue("Expected accountId: " + accountId + " not found", actualContainerStorageStatsMap.containsKey(accountId));
Map<Short, ContainerStorageStats> innerMap = expectedContainerStorageStatsEntry.getValue();
for (Map.Entry<Short, ContainerStorageStats> innerEntry : innerMap.entrySet()) {
short containerId = innerEntry.getKey();
assertTrue("Expected containerId: " + containerId + " not found in accountId: " + accountId, actualContainerStorageStatsMap.get(accountId).containsKey(containerId));
ContainerStorageStats expectedContainerStorageStats = innerEntry.getValue();
ContainerStorageStats actualContainerStorageStats = actualContainerStorageStatsMap.get(accountId).get(containerId);
assertEquals("Storage stats mismatch for accountId: " + accountId + " containerId: " + containerId, expectedContainerStorageStats, actualContainerStorageStats);
totalValidSize += expectedContainerStorageStats.getLogicalStorageUsage();
}
if (innerMap.size() != actualContainerStorageStatsMap.get(accountId).size()) {
// make sure all the new items have value 0
for (Map.Entry<Short, ContainerStorageStats> actualContainerEntry : actualContainerStorageStatsMap.get(accountId).entrySet()) {
if (!innerMap.containsKey(actualContainerEntry.getKey())) {
assertEquals("Expecting 0 value for account id " + accountId + " and container " + actualContainerEntry.getKey(), 0, actualContainerEntry.getValue().getLogicalStorageUsage());
}
}
}
actualContainerStorageStatsMap.remove(accountId);
}
for (Map.Entry<Short, Map<Short, ContainerStorageStats>> actualContainerValidSizeEntry : actualContainerStorageStatsMap.entrySet()) {
if (actualContainerValidSizeEntry.getValue().size() != 0) {
for (Map.Entry<Short, ContainerStorageStats> mapEntry : actualContainerValidSizeEntry.getValue().entrySet()) {
assertEquals("Additional values found in actual container valid size map for service " + actualContainerValidSizeEntry.getKey(), 0, mapEntry.getValue().getLogicalStorageUsage());
}
}
}
// verify delete tombstone stats
verifyDeleteTombstoneStats(blobStoreStats, deleteTombstoneStats);
return totalValidSize;
}
use of com.github.ambry.server.storagestats.ContainerStorageStats in project ambry by linkedin.
the class MySqlClusterAggregatorTest method assertAggregatedRawStatsForAccountStorageStats.
private void assertAggregatedRawStatsForAccountStorageStats(Map<Short, Map<Short, ContainerStorageStats>> raw, Map<Short, Map<Short, ContainerStorageStats>> expected, int nodeCount) {
Assert.assertEquals(expected.size(), raw.size());
for (Map.Entry<Short, Map<Short, ContainerStorageStats>> rawEntry : raw.entrySet()) {
short accountId = rawEntry.getKey();
Assert.assertTrue(expected.containsKey(accountId));
Assert.assertEquals(expected.get(accountId).size(), rawEntry.getValue().size());
for (Map.Entry<Short, ContainerStorageStats> rawContainerEntry : rawEntry.getValue().entrySet()) {
short containerId = rawContainerEntry.getKey();
Assert.assertTrue(expected.get(accountId).containsKey(containerId));
ContainerStorageStats expectedStats = expected.get(accountId).get(containerId);
ContainerStorageStats rawStats = rawContainerEntry.getValue();
Assert.assertEquals(expectedStats.getLogicalStorageUsage() * nodeCount, rawStats.getLogicalStorageUsage());
Assert.assertEquals(expectedStats.getPhysicalStorageUsage() * nodeCount, rawStats.getPhysicalStorageUsage());
Assert.assertEquals(expectedStats.getNumberOfBlobs() * nodeCount, rawStats.getNumberOfBlobs());
}
}
}
use of com.github.ambry.server.storagestats.ContainerStorageStats in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostAccountStorageStatsWithDifferentNumberOfStores.
/**
* Test {@link MySqlClusterAggregator#aggregateHostAccountStorageStatsWrappers}, but with different numbers of partitions
* from different nodes.
* @throws Exception
*/
@Test
public void testAggregateHostAccountStorageStatsWithDifferentNumberOfStores() throws Exception {
// This storage stats has only 2 partitions, partition 0 and partition 1. Each partition has only one account and one container.
String statsInJson = "{'0': {'0': {'0': {'containerId':0, 'logicalStorageUsage':10, 'physicalStorageUsage':20, 'numberOfBlobs':10}}}, '1': {'0': {'0': {'containerId':0, 'logicalStorageUsage':20, 'physicalStorageUsage':40, 'numberOfBlobs':20}}}}";
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStatsMap1 = objectMapper.readValue(statsInJson.replace("'", "\""), HostAccountStorageStats.class).getStorageStats();
// This storage stats has only 3 partitions, partition 0, partition 1 and partition 2. Each partition has only one account and one container.
statsInJson = "{'0': {'0': {'0': {'containerId':0, 'logicalStorageUsage':30, 'physicalStorageUsage':60, 'numberOfBlobs':30}}}, '1': {'0': {'0': {'containerId':0, 'logicalStorageUsage':40, 'physicalStorageUsage':80, 'numberOfBlobs':40}}}, '2': {'0': {'0': {'containerId':0, 'logicalStorageUsage':50, 'physicalStorageUsage':100, 'numberOfBlobs':50}}}}";
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStatsMap2 = objectMapper.readValue(statsInJson.replace("'", "\""), HostAccountStorageStats.class).getStorageStats();
// Raw combined storage stats should only have one account and one container, and its storage stats is the sum of storage stats from all nodes.
statsInJson = "{'0': {'0': {'containerId':0, 'logicalStorageUsage':150, 'physicalStorageUsage':300, 'numberOfBlobs':150}}}";
Map<Short, Map<Short, ContainerStorageStats>> expectedRaw = objectMapper.readValue(statsInJson.replace("'", "\""), new TypeReference<Map<Short, Map<Short, ContainerStorageStats>>>() {
});
// Aggregated storage stats should only have one account and one container, and its storage stats is the sum of storage stats from second node,
// since second node's physical storage usage is larger than first node at every partition.
statsInJson = "{'0': {'0': {'containerId':0, 'logicalStorageUsage':120, 'physicalStorageUsage':240, 'numberOfBlobs':120}}}";
Map<Short, Map<Short, ContainerStorageStats>> expectedValid = objectMapper.readValue(statsInJson.replace("'", "\""), new TypeReference<Map<Short, Map<Short, ContainerStorageStats>>>() {
});
// StorageStatsMap1 only have 2 store stats, StorageStatsMap2 has 3
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 2, 2, Collections.emptyList());
HostAccountStorageStatsWrapper nodeStats1 = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(storageStatsMap1));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 3, 3, Collections.emptyList());
HostAccountStorageStatsWrapper nodeStats2 = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(storageStatsMap2));
Map<String, HostAccountStorageStatsWrapper> instanceStatsMap = new HashMap<>();
instanceStatsMap.put("Instance_1", nodeStats1);
instanceStatsMap.put("Instance_2", nodeStats2);
Pair<AggregatedAccountStorageStats, AggregatedAccountStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostAccountStorageStatsWrappers(instanceStatsMap);
Assert.assertEquals(expectedRaw, aggregatedRawAndValidStats.getFirst().getStorageStats());
Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
use of com.github.ambry.server.storagestats.ContainerStorageStats in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostAccountStorageStatsWithOutdatedNode.
/**
* Test {@link MySqlClusterAggregator#aggregateHostAccountStorageStatsWrappers} but with one node having outdated
* storage stats data. Outdated data shouldn't be used when aggregating valid storage usage.
* @throws Exception
*/
@Test
public void testAggregateHostAccountStorageStatsWithOutdatedNode() throws Exception {
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> upToDateStorageStatsMap = new HashMap<>();
upToDateStorageStatsMap.put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> outdatedStorageStatsMap = new HashMap<>();
outdatedStorageStatsMap.put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 6, 3, 10000L, 2, 10));
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 1, 1, Collections.emptyList());
HostAccountStorageStatsWrapper upToDateNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(upToDateStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 0, 1, 1, Collections.emptyList());
HostAccountStorageStatsWrapper outdatedNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(outdatedStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 0, 0, Collections.emptyList());
HostAccountStorageStatsWrapper emptyNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats());
Map<String, HostAccountStorageStatsWrapper> instanceToStatsMap = new LinkedHashMap<>();
instanceToStatsMap.put("Instance_0", upToDateNodeStats);
instanceToStatsMap.put("Instance_1", outdatedNodeStats);
instanceToStatsMap.put("Instance_2", emptyNodeStats);
Pair<AggregatedAccountStorageStats, AggregatedAccountStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostAccountStorageStatsWrappers(instanceToStatsMap);
Map<Short, Map<Short, ContainerStorageStats>> expectedValid = clusterAggregator.aggregateHostAccountStorageStats(upToDateStorageStatsMap);
Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
use of com.github.ambry.server.storagestats.ContainerStorageStats in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostPartitionClassStorageStats.
/**
* Test basic functionality of {@link MySqlClusterAggregator#aggregateHostPartitionClassStorageStats}.
* @throws Exception
*/
@Test
public void testAggregateHostPartitionClassStorageStats() throws Exception {
int nodeCount = 3;
int numberOfPartitions = 4;
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> storageStatsMap = new HashMap<>();
String[] partitionClassNames = { "default", "newClass" };
for (int i = 0; i < numberOfPartitions; i++) {
String partitionClassName = partitionClassNames[i % partitionClassNames.length];
storageStatsMap.computeIfAbsent(partitionClassName, k -> new HashMap<>()).put((long) i, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, i + 3, 3, 10000L, 2, 10));
}
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, numberOfPartitions, numberOfPartitions, Collections.emptyList());
HostPartitionClassStorageStatsWrapper nodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(storageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 0, 0, Collections.emptyList());
HostPartitionClassStorageStatsWrapper emptyStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats());
Map<String, HostPartitionClassStorageStatsWrapper> instanceToStatsMap = new HashMap<>();
for (int i = 0; i < nodeCount; i++) {
instanceToStatsMap.put("Instance_" + i, new HostPartitionClassStorageStatsWrapper(new StatsHeader(nodeStats.getHeader()), new HostPartitionClassStorageStats(nodeStats.getStats())));
}
instanceToStatsMap.put("Instance_" + nodeCount, emptyStats);
Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostPartitionClassStorageStatsWrappers(instanceToStatsMap);
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> expectedAggregatedStorageStatsMap = clusterAggregator.aggregateHostPartitionClassStorageStats(storageStatsMap);
Assert.assertEquals(expectedAggregatedStorageStatsMap, aggregatedRawAndValidStats.getSecond().getStorageStats());
assertAggregatedRawStatsForPartitionClassStorageStats(aggregatedRawAndValidStats.getFirst().getStorageStats(), expectedAggregatedStorageStatsMap, nodeCount);
}
Aggregations