use of com.github.ambry.server.StatsWrapper in project ambry by linkedin.
the class HelixHealthReportAggregationTaskTest method initializeNodeReports.
/**
* Initialize the reports and create instances in helix if not exists.
* @param type The type of reports to create
* @param numNode The number of nodes to initiate.
* @param startingPort The starting port number, which will then be incremented to represent different nodes.
* @throws IOException
*/
private void initializeNodeReports(StatsReportType type, int numNode, int startingPort) throws IOException {
String healthReportName = type == StatsReportType.ACCOUNT_REPORT ? HEALTH_REPORT_NAME_ACCOUNT : HEALTH_REPORT_NAME_PARTITION;
String statsFieldName = type == StatsReportType.ACCOUNT_REPORT ? STATS_FIELD_NAME_ACCOUNT : STATS_FIELD_NAME_PARTITION;
List<StatsSnapshot> storeSnapshots = new ArrayList<>();
Random random = new Random();
for (int i = 3; i < 6; i++) {
storeSnapshots.add(TestUtils.generateStoreStats(i, 3, random, type));
}
StatsWrapper nodeStats = TestUtils.generateNodeStats(storeSnapshots, 1000, type);
String nodeStatsJSON = mapper.writeValueAsString(nodeStats);
HelixDataAccessor dataAccessor = mockHelixManager.getHelixDataAccessor();
for (int i = 0; i < numNode; i++) {
String instanceName = ClusterMapUtils.getInstanceName("localhost", startingPort);
InstanceConfig instanceConfig = new InstanceConfig(instanceName);
instanceConfig.setHostName("localhost");
instanceConfig.setPort(Integer.toString(startingPort));
mockHelixAdmin.addInstance(CLUSTER_NAME, instanceConfig);
PropertyKey key = dataAccessor.keyBuilder().healthReport(instanceName, healthReportName);
ZNRecord znRecord = new ZNRecord(instanceName);
// Set the same reports for all instances
znRecord.setSimpleField(statsFieldName, nodeStatsJSON);
HelixProperty helixProperty = new HelixProperty(znRecord);
dataAccessor.setProperty(key, helixProperty);
startingPort++;
}
}
use of com.github.ambry.server.StatsWrapper in project ambry by linkedin.
the class AccountStatsMySqlStoreIntegrationTest method testHostPartitionClassStats.
/**
* Test methods to store and fetch partition class, partition name partition id and partition class stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
StatsWrapper accountStats1 = mySqlStore.queryAccountStatsByHost(hostname1, port1);
StatsWrapper accountStats2 = mySqlStore.queryAccountStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
StatsWrapper accountStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<String> allPartitionKeys = new HashSet<String>() {
{
addAll(accountStats1.getSnapshot().getSubMap().keySet());
addAll(accountStats2.getSnapshot().getSubMap().keySet());
addAll(accountStats3.getSnapshot().getSubMap().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<String, String> partitionKeyToClassName = new HashMap<>();
int ind = 0;
for (String partitionKey : allPartitionKeys) {
partitionKeyToClassName.put(partitionKey, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
StatsWrapper partitionClassStats1 = convertAccountStatsToPartitionClassStats(accountStats1, partitionKeyToClassName);
StatsWrapper partitionClassStats2 = convertAccountStatsToPartitionClassStats(accountStats2, partitionKeyToClassName);
StatsWrapper partitionClassStats3 = convertAccountStatsToPartitionClassStats(accountStats3, partitionKeyToClassName);
mySqlStore.storePartitionClassStats(partitionClassStats1);
mySqlStore.storePartitionClassStats(partitionClassStats2);
mySqlStore3.storePartitionClassStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<String, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet().stream().flatMap(ent -> ent.getValue().stream().map(pid -> new Pair<String, String>(ent.getKey(), "Partition[" + pid + "]"))).collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionKeyToClassName, dbPartitionKeyToClassName);
StatsWrapper obtainedStats1 = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getSnapshot(), obtainedStats1.getSnapshot());
StatsWrapper obtainedStats2 = mySqlStore.queryPartitionClassStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getSnapshot(), obtainedStats2.getSnapshot());
StatsWrapper obtainedStats3 = mySqlStore3.queryPartitionClassStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getSnapshot(), obtainedStats3.getSnapshot());
mySqlStore3.shutdown();
}
use of com.github.ambry.server.StatsWrapper in project ambry by linkedin.
the class AccountStatsMySqlStoreIntegrationTest method testHostPartitionClassStorageStats.
/**
* Test methods to store and fetch partition class, partition name partition id and partition class storage stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStorageStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
HostAccountStorageStatsWrapper accountStats1 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper accountStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
HostAccountStorageStatsWrapper accountStats3 = mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<Long> allPartitionKeys = new HashSet<Long>() {
{
addAll(accountStats1.getStats().getStorageStats().keySet());
addAll(accountStats2.getStats().getStorageStats().keySet());
addAll(accountStats3.getStats().getStorageStats().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<Long, String> partitionIdToClassName = new HashMap<>();
int ind = 0;
for (long partitionId : allPartitionKeys) {
partitionIdToClassName.put(partitionId, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
HostPartitionClassStorageStatsWrapper partitionClassStats1 = convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats1, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats2 = convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats2, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats3 = convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats3, partitionIdToClassName);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats1);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats2);
mySqlStore3.storeHostPartitionClassStorageStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<Long, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet().stream().flatMap(ent -> ent.getValue().stream().map(pid -> new Pair<>(ent.getKey(), (long) pid))).collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionIdToClassName, dbPartitionKeyToClassName);
// Fetch HostPartitionClassStorageStats
HostPartitionClassStorageStatsWrapper obtainedStats1 = mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getStats().getStorageStats(), obtainedStats1.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getStats().getStorageStats(), obtainedStats2.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats3 = mySqlStore3.queryHostPartitionClassStorageStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getStats().getStorageStats(), obtainedStats3.getStats().getStorageStats());
// Fetch StatsSnapshot
StatsWrapper obtainedStats = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(StorageStatsUtil.convertHostPartitionClassStorageStatsToStatsSnapshot(obtainedStats1.getStats(), false), obtainedStats.getSnapshot());
mySqlStore3.shutdown();
}
use of com.github.ambry.server.StatsWrapper in project ambry by linkedin.
the class AccountStatsMySqlStoreIntegrationTest method convertAccountStatsToPartitionClassStats.
private StatsWrapper convertAccountStatsToPartitionClassStats(StatsWrapper accountStats, Map<String, String> partitionKeyToClassName) {
Map<String, StatsSnapshot> partitionClassSubMap = new HashMap<>();
StatsSnapshot originHostStats = accountStats.getSnapshot();
for (String partitionKey : originHostStats.getSubMap().keySet()) {
StatsSnapshot originPartitionStats = originHostStats.getSubMap().get(partitionKey);
String currentClassName = partitionKeyToClassName.get(partitionKey);
StatsSnapshot partitionClassStats = partitionClassSubMap.computeIfAbsent(currentClassName, k -> new StatsSnapshot(0L, new HashMap<>()));
Map<String, StatsSnapshot> accountContainerSubMap = new HashMap<>();
for (String accountKey : originPartitionStats.getSubMap().keySet()) {
for (Map.Entry<String, StatsSnapshot> containerEntry : originPartitionStats.getSubMap().get(accountKey).getSubMap().entrySet()) {
String containerKey = containerEntry.getKey();
StatsSnapshot containerStats = new StatsSnapshot(containerEntry.getValue());
String accountContainerKey = Utils.partitionClassStatsAccountContainerKey(Utils.accountIdFromStatsAccountKey(accountKey), Utils.containerIdFromStatsContainerKey(containerKey));
accountContainerSubMap.put(accountContainerKey, containerStats);
}
}
long accountContainerValue = accountContainerSubMap.values().stream().mapToLong(StatsSnapshot::getValue).sum();
StatsSnapshot partitionStats = new StatsSnapshot(accountContainerValue, accountContainerSubMap);
partitionClassStats.getSubMap().put(partitionKey, partitionStats);
partitionClassStats.setValue(partitionClassStats.getValue() + accountContainerValue);
}
return new StatsWrapper(new StatsHeader(accountStats.getHeader()), new StatsSnapshot(originHostStats.getValue(), partitionClassSubMap));
}
use of com.github.ambry.server.StatsWrapper in project ambry by linkedin.
the class HelixClusterAggregator method doWorkOnStatsWrapperMap.
Pair<StatsSnapshot, StatsSnapshot> doWorkOnStatsWrapperMap(Map<String, StatsWrapper> statsWrappers, StatsReportType type, boolean removeExceptionOnType) throws IOException {
StatsSnapshot partitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
Map<String, Long> partitionTimestampMap = new HashMap<>();
StatsSnapshot rawPartitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
if (removeExceptionOnType) {
exceptionOccurredInstances.remove(type);
}
for (Map.Entry<String, StatsWrapper> statsWrapperEntry : statsWrappers.entrySet()) {
if (statsWrapperEntry != null && statsWrapperEntry.getValue() != null) {
try {
StatsWrapper snapshotWrapper = statsWrapperEntry.getValue();
StatsWrapper snapshotWrapperCopy = new StatsWrapper(new StatsHeader(snapshotWrapper.getHeader()), new StatsSnapshot(snapshotWrapper.getSnapshot()));
combineRawStats(rawPartitionSnapshot, snapshotWrapper);
switch(type) {
case ACCOUNT_REPORT:
combineValidStatsByAccount(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
break;
case PARTITION_CLASS_REPORT:
combineValidStatsByPartitionClass(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
break;
default:
throw new IllegalArgumentException("Unrecognized stats report type: " + type);
}
} catch (Exception e) {
logger.error("Exception occurred while processing stats from {}", statsWrapperEntry.getKey(), e);
exceptionOccurredInstances.computeIfAbsent(type, key -> new ArrayList<>()).add(statsWrapperEntry.getKey());
}
}
}
if (logger.isTraceEnabled()) {
logger.trace("Combined raw snapshot {}", mapper.writeValueAsString(rawPartitionSnapshot));
logger.trace("Combined valid snapshot {}", mapper.writeValueAsString(partitionSnapshot));
}
StatsSnapshot reducedRawSnapshot;
StatsSnapshot reducedSnapshot;
switch(type) {
case ACCOUNT_REPORT:
reducedRawSnapshot = reduceByAccount(rawPartitionSnapshot);
reducedSnapshot = reduceByAccount(partitionSnapshot);
break;
case PARTITION_CLASS_REPORT:
reducedRawSnapshot = reduceByPartitionClass(rawPartitionSnapshot);
reducedSnapshot = reduceByPartitionClass(partitionSnapshot);
break;
default:
throw new IllegalArgumentException("Unrecognized stats report type: " + type);
}
reducedRawSnapshot.removeZeroValueSnapshots();
reducedSnapshot.removeZeroValueSnapshots();
if (logger.isTraceEnabled()) {
logger.trace("Reduced raw snapshot {}", mapper.writeValueAsString(reducedRawSnapshot));
logger.trace("Reduced valid snapshot {}", mapper.writeValueAsString(reducedSnapshot));
}
return new Pair<>(reducedRawSnapshot, reducedSnapshot);
}
Aggregations