use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class AccountStatsMySqlStoreTest method testLocalBackupFile.
@Test
public void testLocalBackupFile() throws IOException {
// First, make sure there is no local backup file.
Path tempDir = Files.createTempDirectory("AccountStatsMySqlStoreTest");
Path localBackupFilePath = tempDir.resolve("localbackup");
Properties prop = new Properties();
prop.setProperty(AccountStatsMySqlConfig.LOCAL_BACKUP_FILE_PATH, localBackupFilePath.toString());
AccountStatsMySqlConfig accountStatsMySqlConfig = new AccountStatsMySqlConfig(new VerifiableProperties(prop));
AccountStatsMySqlStore store = new AccountStatsMySqlStore(accountStatsMySqlConfig, mockDataSource, clusterName, hostname, null, new MetricRegistry());
assertNull(store.getPreviousHostAccountStorageStatsWrapper());
// Second, save a backup file.
HostAccountStorageStats hostAccountStorageStats = new HostAccountStorageStats(StorageStatsUtilTest.generateRandomHostAccountStorageStats(10, 10, 10, 10000L, 2, 10));
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, System.currentTimeMillis(), 10, 10, null);
HostAccountStorageStatsWrapper statsWrapper = new HostAccountStorageStatsWrapper(header, hostAccountStorageStats);
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.writeValue(localBackupFilePath.toFile(), statsWrapper);
store = new AccountStatsMySqlStore(accountStatsMySqlConfig, mockDataSource, clusterName, hostname, null, new MetricRegistry());
HostAccountStorageStatsWrapper backupWrapper = store.getPreviousHostAccountStorageStatsWrapper();
assertNotNull(backupWrapper);
assertStatsHeader(backupWrapper.getHeader(), 10, 10);
Assert.assertEquals(hostAccountStorageStats.getStorageStats(), backupWrapper.getStats().getStorageStats());
}
use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class HelixClusterAggregator method doWorkOnStatsWrapperMap.
Pair<StatsSnapshot, StatsSnapshot> doWorkOnStatsWrapperMap(Map<String, StatsWrapper> statsWrappers, StatsReportType type, boolean removeExceptionOnType) throws IOException {
StatsSnapshot partitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
Map<String, Long> partitionTimestampMap = new HashMap<>();
StatsSnapshot rawPartitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
if (removeExceptionOnType) {
exceptionOccurredInstances.remove(type);
}
for (Map.Entry<String, StatsWrapper> statsWrapperEntry : statsWrappers.entrySet()) {
if (statsWrapperEntry != null && statsWrapperEntry.getValue() != null) {
try {
StatsWrapper snapshotWrapper = statsWrapperEntry.getValue();
StatsWrapper snapshotWrapperCopy = new StatsWrapper(new StatsHeader(snapshotWrapper.getHeader()), new StatsSnapshot(snapshotWrapper.getSnapshot()));
combineRawStats(rawPartitionSnapshot, snapshotWrapper);
switch(type) {
case ACCOUNT_REPORT:
combineValidStatsByAccount(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
break;
case PARTITION_CLASS_REPORT:
combineValidStatsByPartitionClass(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
break;
default:
throw new IllegalArgumentException("Unrecognized stats report type: " + type);
}
} catch (Exception e) {
logger.error("Exception occurred while processing stats from {}", statsWrapperEntry.getKey(), e);
exceptionOccurredInstances.computeIfAbsent(type, key -> new ArrayList<>()).add(statsWrapperEntry.getKey());
}
}
}
if (logger.isTraceEnabled()) {
logger.trace("Combined raw snapshot {}", mapper.writeValueAsString(rawPartitionSnapshot));
logger.trace("Combined valid snapshot {}", mapper.writeValueAsString(partitionSnapshot));
}
StatsSnapshot reducedRawSnapshot;
StatsSnapshot reducedSnapshot;
switch(type) {
case ACCOUNT_REPORT:
reducedRawSnapshot = reduceByAccount(rawPartitionSnapshot);
reducedSnapshot = reduceByAccount(partitionSnapshot);
break;
case PARTITION_CLASS_REPORT:
reducedRawSnapshot = reduceByPartitionClass(rawPartitionSnapshot);
reducedSnapshot = reduceByPartitionClass(partitionSnapshot);
break;
default:
throw new IllegalArgumentException("Unrecognized stats report type: " + type);
}
reducedRawSnapshot.removeZeroValueSnapshots();
reducedSnapshot.removeZeroValueSnapshots();
if (logger.isTraceEnabled()) {
logger.trace("Reduced raw snapshot {}", mapper.writeValueAsString(reducedRawSnapshot));
logger.trace("Reduced valid snapshot {}", mapper.writeValueAsString(reducedSnapshot));
}
return new Pair<>(reducedRawSnapshot, reducedSnapshot);
}
use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostAccountStorageStatsWithEarlyNode.
/**
* Test {@link MySqlClusterAggregator#aggregateHostAccountStorageStatsWrappers} with one node having earlier timestamp
* but larger storage stats data. Larger data should be used when aggregating valid storage stats.
* @throws Exception
*/
@Test
public void testAggregateHostAccountStorageStatsWithEarlyNode() throws Exception {
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> upToDateStorageStatsMap = new HashMap<>();
upToDateStorageStatsMap.put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10L, 2, 10));
// Use a much larger maximum number when creating early storage stats so the values would be larger here.
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> earlyStorageStatsMap = new HashMap<>();
earlyStorageStatsMap.put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 6, 3, 10000L, 2, 10));
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(RELEVANT_PERIOD_IN_MINUTES / 2), 1, 1, Collections.emptyList());
HostAccountStorageStatsWrapper upToDateNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(upToDateStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 0, 1, 1, Collections.emptyList());
HostAccountStorageStatsWrapper earlyNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(earlyStorageStatsMap));
Map<String, HostAccountStorageStatsWrapper> instanceToStatsMap = new LinkedHashMap<>();
instanceToStatsMap.put("Instance_0", upToDateNodeStats);
instanceToStatsMap.put("Instance_1", earlyNodeStats);
Pair<AggregatedAccountStorageStats, AggregatedAccountStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostAccountStorageStatsWrappers(instanceToStatsMap);
Map<Short, Map<Short, ContainerStorageStats>> expectedValid = clusterAggregator.aggregateHostAccountStorageStats(earlyStorageStatsMap);
Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostPartitionClassStorageStatsWithOutdatedNode.
/**
* Test {@link MySqlClusterAggregator#aggregateHostPartitionClassStorageStatsWrappers} but with one node having outdated
* storage stats data. Outdated data shouldn't be used when aggregating valid storage stats.
* @throws Exception
*/
@Test
public void testAggregateHostPartitionClassStorageStatsWithOutdatedNode() throws Exception {
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> upToDateStorageStatsMap = new HashMap<>();
upToDateStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> outdatedStorageStatsMap = new HashMap<>();
outdatedStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 1, 1, Collections.emptyList());
HostPartitionClassStorageStatsWrapper upToDateNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(upToDateStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 0, 1, 1, Collections.emptyList());
HostPartitionClassStorageStatsWrapper outdatedNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(outdatedStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 0, 0, Collections.emptyList());
HostPartitionClassStorageStatsWrapper emptyNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats());
Map<String, HostPartitionClassStorageStatsWrapper> instanceToStatsMap = new LinkedHashMap<>();
instanceToStatsMap.put("Instance_0", upToDateNodeStats);
instanceToStatsMap.put("Instance_1", outdatedNodeStats);
instanceToStatsMap.put("Instance_2", emptyNodeStats);
Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostPartitionClassStorageStatsWrappers(instanceToStatsMap);
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> expectedValid = clusterAggregator.aggregateHostPartitionClassStorageStats(upToDateStorageStatsMap);
Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
use of com.github.ambry.server.StatsHeader in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostAccountStorageStats.
/**
* Test basic functionality of {@link MySqlClusterAggregator#aggregateHostAccountStorageStats}.
* @throws Exception
*/
@Test
public void testAggregateHostAccountStorageStats() throws Exception {
int nodeCount = 3;
// Partition id to account id to container id to container storage stats.
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStatsMap = new HashMap<>();
// Create storage stats for 3 partitions, each has 3, 4 and 5 accounts.
for (int i = 0; i < 3; i++) {
storageStatsMap.put((long) i, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, i + 3, 3, 10000L, 2, 10));
}
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 3, 3, Collections.emptyList());
HostAccountStorageStatsWrapper nodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(storageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 0, 0, Collections.emptyList());
HostAccountStorageStatsWrapper emptyStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats());
Map<String, HostAccountStorageStatsWrapper> instanceToStatsMap = new HashMap<>();
// So the combined raw data would be three times of the aggregated data (we have 3 nodes).
for (int i = 0; i < nodeCount; i++) {
instanceToStatsMap.put("Instance_" + i, new HostAccountStorageStatsWrapper(new StatsHeader(nodeStats.getHeader()), new HostAccountStorageStats(nodeStats.getStats())));
}
instanceToStatsMap.put("Instance_" + nodeCount, emptyStats);
Pair<AggregatedAccountStorageStats, AggregatedAccountStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostAccountStorageStatsWrappers(instanceToStatsMap);
Map<Short, Map<Short, ContainerStorageStats>> expectedAggregatedStorageStatsMap = clusterAggregator.aggregateHostAccountStorageStats(storageStatsMap);
Assert.assertEquals(expectedAggregatedStorageStatsMap, aggregatedRawAndValidStats.getSecond().getStorageStats());
assertAggregatedRawStatsForAccountStorageStats(aggregatedRawAndValidStats.getFirst().getStorageStats(), expectedAggregatedStorageStatsMap, nodeCount);
}
Aggregations