use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class HelixClusterAggregatorTest method testStatsAggregationWithZeroValueSnapshots.
/**
* Test removing zero value snapshots from aggregated result.
* @throws IOException
*/
@Test
public void testStatsAggregationWithZeroValueSnapshots() throws IOException {
int nodeCount = 3;
Random random = new Random();
// For each type of report, create snapshots for 3 stores with 3 accounts, 4 accounts and 5 accounts respectively.
for (StatsReportType type : EnumSet.of(StatsReportType.ACCOUNT_REPORT, StatsReportType.PARTITION_CLASS_REPORT)) {
List<StatsSnapshot> storeSnapshots = new ArrayList<>();
for (int i = 3; i < 6; i++) {
storeSnapshots.add(TestUtils.generateStoreStats(i, 3, random, type));
}
// add zero value to the first snapshot
if (type == StatsReportType.ACCOUNT_REPORT) {
Map<String, StatsSnapshot> accountStatsSnapshotMap = storeSnapshots.get(0).getSubMap();
accountStatsSnapshotMap.put("A[100]", new StatsSnapshot(0L, new HashMap<String, StatsSnapshot>() {
{
put("C[100]", new StatsSnapshot(0L, null));
}
}));
} else {
Map<String, StatsSnapshot> accountContainerStatsSnapshotMap = storeSnapshots.get(0).getSubMap();
accountContainerStatsSnapshotMap.put("A[100]_C[100]", new StatsSnapshot(0L, null));
}
StatsWrapper nodeStats = TestUtils.generateNodeStats(storeSnapshots, DEFAULT_TIMESTAMP, type);
String nodeStatsJSON = mapper.writeValueAsString(nodeStats);
Map<String, String> instanceToStatsMap = new HashMap<>();
for (int i = 0; i < nodeCount; i++) {
instanceToStatsMap.put("Instance_" + i, nodeStatsJSON);
}
// 1. Aggregate all snapshots into the first snapshot in snapshots list. The intention is to get expected aggregated snapshot.
// 2. Then invoke clusterAggregator to do work on stats across all instances.
// 3. Verify both raw stats and valid stats after aggregation
Pair<StatsSnapshot, StatsSnapshot> aggregatedRawAndValidStats = clusterAggregator.doWork(instanceToStatsMap, type);
// Remove 0 values snapshots first
StatsSnapshot expectedSnapshot = null;
switch(type) {
case ACCOUNT_REPORT:
storeSnapshots.get(0).getSubMap().remove("A[100]");
// we expect for valid data aggregation.
for (int i = 1; i < storeSnapshots.size(); i++) {
StatsSnapshot.aggregate(storeSnapshots.get(0), storeSnapshots.get(i));
}
expectedSnapshot = storeSnapshots.get(0);
break;
case PARTITION_CLASS_REPORT:
storeSnapshots.get(0).getSubMap().remove("A[100]_C[100]");
// Invoke reduceByPartitionClass to remove partition level and only keep the partition class and account_container entries
expectedSnapshot = HelixClusterAggregator.reduceByPartitionClass(nodeStats.getSnapshot());
break;
}
// Verify cluster wide raw stats aggregation
StatsSnapshot rawSnapshot = mapper.readValue(mapper.writeValueAsString(aggregatedRawAndValidStats.getFirst()), StatsSnapshot.class);
assertEquals("Mismatch in total value of " + type, nodeCount * expectedSnapshot.getValue(), rawSnapshot.getValue());
if (type == StatsReportType.ACCOUNT_REPORT) {
verifyAggregatedRawStatsForAccountReport(rawSnapshot, expectedSnapshot, nodeCount);
} else if (type == StatsReportType.PARTITION_CLASS_REPORT) {
verifyAggregatedRawStatsForPartitionClassReport(rawSnapshot, expectedSnapshot, nodeCount);
}
// Verify cluster wide stats aggregation
StatsSnapshot actualSnapshot = mapper.readValue(mapper.writeValueAsString(aggregatedRawAndValidStats.getSecond()), StatsSnapshot.class);
assertTrue("Mismatch in the aggregated snapshot", expectedSnapshot.equals(actualSnapshot));
}
}
use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class HelixClusterAggregatorTest method testDoWorkWithOutdatedNode.
/**
* Tests to verify cluster wide aggregation with outdated node stats.
* @throws IOException
*/
@Test
public void testDoWorkWithOutdatedNode() throws IOException {
long seed = 1111;
for (StatsReportType type : EnumSet.of(StatsReportType.ACCOUNT_REPORT, StatsReportType.PARTITION_CLASS_REPORT)) {
List<StatsSnapshot> upToDateStoreSnapshots = new ArrayList<>();
List<StatsSnapshot> outdatedStoreSnapshots = new ArrayList<>();
upToDateStoreSnapshots.add(TestUtils.generateStoreStats(5, 3, new Random(seed), type));
outdatedStoreSnapshots.add(TestUtils.generateStoreStats(6, 3, new Random(seed), type));
StatsWrapper upToDateNodeStats = TestUtils.generateNodeStats(upToDateStoreSnapshots, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), type);
StatsWrapper outdatedNodeStats = TestUtils.generateNodeStats(outdatedStoreSnapshots, 0, type);
StatsWrapper emptyNodeStats = TestUtils.generateNodeStats(Collections.emptyList(), TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), type);
Map<String, String> instanceToStatsMap = new LinkedHashMap<>();
instanceToStatsMap.put("Instance_0", mapper.writeValueAsString(outdatedNodeStats));
instanceToStatsMap.put("Instance_1", mapper.writeValueAsString(upToDateNodeStats));
instanceToStatsMap.put("Instance_2", mapper.writeValueAsString(emptyNodeStats));
instanceToStatsMap.put(EXCEPTION_INSTANCE_NAME, "");
Pair<StatsSnapshot, StatsSnapshot> aggregatedRawAndValidStats = clusterAggregator.doWork(instanceToStatsMap, type);
StatsSnapshot expectedValidSnapshot = null;
StatsSnapshot expectedRawSnapshot = new StatsSnapshot(0L, new HashMap<>());
switch(type) {
case ACCOUNT_REPORT:
expectedValidSnapshot = upToDateStoreSnapshots.get(0);
StatsSnapshot.aggregate(expectedRawSnapshot, outdatedStoreSnapshots.get(0));
StatsSnapshot.aggregate(expectedRawSnapshot, upToDateStoreSnapshots.get(0));
break;
case PARTITION_CLASS_REPORT:
expectedValidSnapshot = HelixClusterAggregator.reduceByPartitionClass(upToDateNodeStats.getSnapshot());
StatsSnapshot.aggregate(expectedRawSnapshot, outdatedNodeStats.getSnapshot());
StatsSnapshot.aggregate(expectedRawSnapshot, upToDateNodeStats.getSnapshot());
expectedRawSnapshot = HelixClusterAggregator.reduceByPartitionClass(expectedRawSnapshot);
break;
}
// verify cluster wide aggregation on raw stats with outdated node stats
StatsSnapshot rawSnapshot = mapper.readValue(mapper.writeValueAsString(aggregatedRawAndValidStats.getFirst()), StatsSnapshot.class);
assertTrue("Mismatch in the aggregated raw snapshot", expectedRawSnapshot.equals(rawSnapshot));
// verify cluster wide aggregation on valid stats with outdated node stats
StatsSnapshot actualSnapshot = mapper.readValue(mapper.writeValueAsString(aggregatedRawAndValidStats.getSecond()), StatsSnapshot.class);
assertTrue("Mismatch in the aggregated valid snapshot", expectedValidSnapshot.equals(actualSnapshot));
// verify aggregator keeps track of instances where exception occurred.
assertEquals("Mismatch in instances where exception occurred", Collections.singletonList(EXCEPTION_INSTANCE_NAME), clusterAggregator.getExceptionOccurredInstances(type));
}
}
use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class AccountStatsMySqlStoreIntegrationTest method testAggregatedPartitionClassStorageStats.
@Test
public void testAggregatedPartitionClassStorageStats() throws Exception {
testHostPartitionClassStorageStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
AggregatedPartitionClassStorageStats aggregatedStats = new AggregatedPartitionClassStorageStats(StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedPartitionClassStorageStats(aggregatedStats);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
AggregatedPartitionClassStorageStats aggregatedStats3 = new AggregatedPartitionClassStorageStats(StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore3.storeAggregatedPartitionClassStorageStats(aggregatedStats3);
AggregatedPartitionClassStorageStats obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats.getStorageStats(), obtained.getStorageStats());
assertEquals(mySqlStore.queryAggregatedPartitionClassStorageStatsByClusterName("random-cluster").getStorageStats().size(), 0);
AggregatedPartitionClassStorageStats obtained3 = mySqlStore3.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats3.getStorageStats(), obtained3.getStorageStats());
// Fetch StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(StorageStatsUtil.convertAggregatedPartitionClassStorageStatsToStatsSnapshot(obtained, false), obtainedSnapshot);
// Change one value and store it to mysql database again
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap = new HashMap<>(aggregatedStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get("default").get((short) 1).get((short) 1);
newStorageStatsMap.get("default").get((short) 1).put((short) 1, new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
mySqlStore.storeAggregatedPartitionClassStorageStats(new AggregatedPartitionClassStorageStats(newStorageStatsMap));
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
// Delete some account and container
short accountId = (short) 1;
short containerId = (short) 1;
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newStorageStatsMap.get(partitionClassName).get(accountId).remove(containerId);
}
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
mySqlStore3.shutdown();
}
use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class AccountStatsMySqlStoreIntegrationTest method generateStatsWrapper.
private static StatsWrapper generateStatsWrapper(int numPartitions, int numAccounts, int numContainers, StatsReportType reportType) {
Random random = new Random();
List<StatsSnapshot> storeSnapshots = new ArrayList<>();
for (int i = 0; i < numPartitions; i++) {
storeSnapshots.add(TestUtils.generateStoreStats(numAccounts, numContainers, random, reportType));
}
return TestUtils.generateNodeStats(storeSnapshots, 1000, reportType);
}
use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class AccountStatsMySqlStoreIntegrationTest method testMultiStoreStats.
/**
* Tests to store multiple stats for multiple hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testMultiStoreStats() throws Exception {
AccountStatsMySqlStore mySqlStore1 = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
AccountStatsMySqlStore mySqlStore2 = createAccountStatsMySqlStore(clusterName1, hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Generating StatsWrappers, store StatsWrappers and retrieve StatsWrappers
StatsWrapper stats1 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats2 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats3 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeAccountStats(stats1);
mySqlStore2.storeAccountStats(stats2);
mySqlStore3.storeAccountStats(stats3);
assertTableSize(mySqlStore1, 3 * 10 * 10);
StatsWrapper obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsWrapper obtainedStats2 = mySqlStore2.queryAccountStatsByHost(hostname2, port2);
StatsWrapper obtainedStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
assertTwoStatsSnapshots(obtainedStats1.getSnapshot(), stats1.getSnapshot());
assertTwoStatsSnapshots(obtainedStats2.getSnapshot(), stats2.getSnapshot());
assertTwoStatsSnapshots(obtainedStats3.getSnapshot(), stats3.getSnapshot());
// Generating HostAccountStorageStatsWrappers, store and retrieve them
HostAccountStorageStatsWrapper hostStats1 = generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats2 = generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats3 = generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeHostAccountStorageStats(hostStats1);
mySqlStore2.storeHostAccountStorageStats(hostStats2);
mySqlStore3.storeHostAccountStorageStats(hostStats3);
HostAccountStorageStatsWrapper obtainedHostStats1 = mySqlStore1.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper obtainedHostStats2 = mySqlStore2.queryHostAccountStorageStatsByHost(hostname2, port2);
HostAccountStorageStatsWrapper obtainedHostStats3 = mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
assertEquals(hostStats1.getStats().getStorageStats(), obtainedHostStats1.getStats().getStorageStats());
assertEquals(hostStats2.getStats().getStorageStats(), obtainedHostStats2.getStats().getStorageStats());
assertEquals(hostStats3.getStats().getStorageStats(), obtainedHostStats3.getStats().getStorageStats());
// Retrieve StatWrappers
obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsSnapshot converted = StorageStatsUtil.convertHostAccountStorageStatsToStatsSnapshot(hostStats1.getStats(), false);
assertTwoStatsSnapshots(converted, obtainedStats1.getSnapshot());
mySqlStore1.shutdown();
mySqlStore2.shutdown();
mySqlStore3.shutdown();
}
Aggregations