use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class HelixClusterAggregator method doWorkOnStatsWrapperMap.
Pair<StatsSnapshot, StatsSnapshot> doWorkOnStatsWrapperMap(Map<String, StatsWrapper> statsWrappers, StatsReportType type, boolean removeExceptionOnType) throws IOException {
StatsSnapshot partitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
Map<String, Long> partitionTimestampMap = new HashMap<>();
StatsSnapshot rawPartitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
if (removeExceptionOnType) {
exceptionOccurredInstances.remove(type);
}
for (Map.Entry<String, StatsWrapper> statsWrapperEntry : statsWrappers.entrySet()) {
if (statsWrapperEntry != null && statsWrapperEntry.getValue() != null) {
try {
StatsWrapper snapshotWrapper = statsWrapperEntry.getValue();
StatsWrapper snapshotWrapperCopy = new StatsWrapper(new StatsHeader(snapshotWrapper.getHeader()), new StatsSnapshot(snapshotWrapper.getSnapshot()));
combineRawStats(rawPartitionSnapshot, snapshotWrapper);
switch(type) {
case ACCOUNT_REPORT:
combineValidStatsByAccount(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
break;
case PARTITION_CLASS_REPORT:
combineValidStatsByPartitionClass(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
break;
default:
throw new IllegalArgumentException("Unrecognized stats report type: " + type);
}
} catch (Exception e) {
logger.error("Exception occurred while processing stats from {}", statsWrapperEntry.getKey(), e);
exceptionOccurredInstances.computeIfAbsent(type, key -> new ArrayList<>()).add(statsWrapperEntry.getKey());
}
}
}
if (logger.isTraceEnabled()) {
logger.trace("Combined raw snapshot {}", mapper.writeValueAsString(rawPartitionSnapshot));
logger.trace("Combined valid snapshot {}", mapper.writeValueAsString(partitionSnapshot));
}
StatsSnapshot reducedRawSnapshot;
StatsSnapshot reducedSnapshot;
switch(type) {
case ACCOUNT_REPORT:
reducedRawSnapshot = reduceByAccount(rawPartitionSnapshot);
reducedSnapshot = reduceByAccount(partitionSnapshot);
break;
case PARTITION_CLASS_REPORT:
reducedRawSnapshot = reduceByPartitionClass(rawPartitionSnapshot);
reducedSnapshot = reduceByPartitionClass(partitionSnapshot);
break;
default:
throw new IllegalArgumentException("Unrecognized stats report type: " + type);
}
reducedRawSnapshot.removeZeroValueSnapshots();
reducedSnapshot.removeZeroValueSnapshots();
if (logger.isTraceEnabled()) {
logger.trace("Reduced raw snapshot {}", mapper.writeValueAsString(reducedRawSnapshot));
logger.trace("Reduced valid snapshot {}", mapper.writeValueAsString(reducedSnapshot));
}
return new Pair<>(reducedRawSnapshot, reducedSnapshot);
}
use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class HelixClusterAggregator method reduceByPartitionClass.
/**
* Reduce the given {@link StatsSnapshot} whose first level mapped by PartitionClass to a shallower {@link StatsSnapshot}.
* by adding entries belonging to each partition together and aggregating based on partition class. The partition level
* would be removed after reduce work completes.
* <pre>
* Before Reduce | After Reduce
* --------------------------------------------------------------------------------------------
* { | {
* value: 1000, | value:1000
* subMap:{ | subMap:{
* PartitionClass_1: { | PartitionClass_1: {
* value: 1000, | value: 1000,
* subMap: { | subMap: {
* Partition[1]:{ | Account[1]_Container[1]:{
* value: 400, | value: 1000,
* subMap: { | subMap: null
* Account[1]_Container[1]:{ | }
* value: 400, | }
* subMap: null | }
* } | }
* } | }
* }, |
* Partition[2]:{ |
* value: 600, |
* subMap: { |
* Account[1]_Container[1]:{ |
* value: 600, |
* subMap: null |
* } |
* } |
* } |
* }
* }
* }
* }
* </pre>
* @param statsSnapshot the {@link StatsSnapshot} to be reduced
* @return the reduced {@link StatsSnapshot}
*/
static StatsSnapshot reduceByPartitionClass(StatsSnapshot statsSnapshot) {
StatsSnapshot returnSnapshot = new StatsSnapshot(statsSnapshot.getValue(), new HashMap<>());
Map<String, StatsSnapshot> partitionClassSnapshots = statsSnapshot.getSubMap();
if (!partitionClassSnapshots.isEmpty()) {
for (Map.Entry<String, StatsSnapshot> partitionClassToSnapshot : partitionClassSnapshots.entrySet()) {
StatsSnapshot partitionClassSnapshot = partitionClassToSnapshot.getValue();
StatsSnapshot reducedPartitionClassSnapshot = new StatsSnapshot(0L, null);
Map<String, StatsSnapshot> partitionToSnapshot = partitionClassSnapshot.getSubMap();
for (StatsSnapshot snapshot : partitionToSnapshot.values()) {
StatsSnapshot.aggregate(reducedPartitionClassSnapshot, snapshot);
}
returnSnapshot.getSubMap().put(partitionClassToSnapshot.getKey(), reducedPartitionClassSnapshot);
}
}
return returnSnapshot;
}
use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class BlobStoreStats method convertQuotaToStatsSnapshot.
/**
* Convert a given nested {@link Map} of accountId to containerId to valid size to its corresponding
* {@link StatsSnapshot} object.
* @param quotaMap the nested {@link Map} to be converted
* @return the corresponding {@link StatsSnapshot} object.
*/
static StatsSnapshot convertQuotaToStatsSnapshot(Map<String, Map<String, Long>> quotaMap) {
Map<String, StatsSnapshot> accountValidSizeMap = new HashMap<>();
long totalSize = 0;
for (Map.Entry<String, Map<String, Long>> accountEntry : quotaMap.entrySet()) {
long subTotalSize = 0;
Map<String, StatsSnapshot> containerValidSizeMap = new HashMap<>();
for (Map.Entry<String, Long> containerEntry : accountEntry.getValue().entrySet()) {
subTotalSize += containerEntry.getValue();
containerValidSizeMap.put(containerEntry.getKey(), new StatsSnapshot(containerEntry.getValue(), null));
}
totalSize += subTotalSize;
accountValidSizeMap.put(accountEntry.getKey(), new StatsSnapshot(subTotalSize, containerValidSizeMap));
}
return new StatsSnapshot(totalSize, accountValidSizeMap);
}
use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class BlobStoreStatsTest method testConvertQuotaMapToStatsSnapshot.
/**
* Test the static method that converts the quota stats stored in a nested Map to an {@link StatsSnapshot} object.
*/
@Test
public void testConvertQuotaMapToStatsSnapshot() {
Random random = new Random();
Map<String, Map<String, Long>> quotaMap = new HashMap<>();
Map<String, StatsSnapshot> accountSubMap = new HashMap<>();
long total = 0;
for (int i = 0; i < 10; i++) {
Map<String, StatsSnapshot> containerSubMap = new HashMap<>();
Map<String, Long> innerQuotaMap = new HashMap<>();
long subTotal = 0;
for (int j = 0; j < 3; j++) {
long randValue = random.nextInt(10000);
subTotal += randValue;
innerQuotaMap.put(String.valueOf(j), randValue);
containerSubMap.put(String.valueOf(j), new StatsSnapshot(randValue, null));
}
total += subTotal;
quotaMap.put(String.valueOf(i), innerQuotaMap);
accountSubMap.put(String.valueOf(i), new StatsSnapshot(subTotal, containerSubMap));
}
StatsSnapshot statsSnapshot = new StatsSnapshot(total, accountSubMap);
StatsSnapshot convertedStatsSnapshot = BlobStoreStats.convertQuotaToStatsSnapshot(quotaMap);
assertTrue("Mismatch between the converted StatsSnapshot and expected StatsSnapshot", statsSnapshot.equals(convertedStatsSnapshot));
}
use of com.github.ambry.server.StatsSnapshot in project ambry by linkedin.
the class BlobStoreStatsTest method testGetStatsSnapshot.
/**
* Test the getStatsSnapshot method by verifying the returned {@link StatsSnapshot} against the original {@link Map}.
*/
@Test
public void testGetStatsSnapshot() throws StoreException {
BlobStoreStats blobStoreStats = setupBlobStoreStats(0, 0);
long deleteAndExpirationRefTimeInMs = state.time.milliseconds();
Map<String, Map<String, Long>> quotaMap = blobStoreStats.getValidDataSizeByContainer(deleteAndExpirationRefTimeInMs);
StatsSnapshot statsSnapshot = blobStoreStats.getStatsSnapshot(deleteAndExpirationRefTimeInMs);
Map<String, StatsSnapshot> accountValidSizeMap = statsSnapshot.getSubMap();
assertEquals("Mismatch on number of accounts", quotaMap.size(), accountValidSizeMap.size());
for (Map.Entry<String, Map<String, Long>> entry : quotaMap.entrySet()) {
Map<String, StatsSnapshot> containerValidSizeMap = accountValidSizeMap.get(entry.getKey()).getSubMap();
Map<String, Long> innerQuotaMap = entry.getValue();
assertEquals("Mismatch on number of containers", innerQuotaMap.size(), containerValidSizeMap.size());
for (Map.Entry<String, Long> innerEntry : innerQuotaMap.entrySet()) {
assertEquals("Mismatch on leaf node value", innerEntry.getValue().longValue(), containerValidSizeMap.get(innerEntry.getKey()).getValue());
}
}
}
Aggregations