use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class ConsistencyCheckerTool method getIndexProcessingResults.
/**
* Processes the indexes of each of the replicas and returns the results.
* @param replicas the replicas to process indexes for.
* @return a {@link Pair} whose first indicates whether all results were sane and whose second contains the map of
* individual results by replica.
* @throws Exception if there is any error in processing the indexes.
*/
private Pair<Boolean, Map<File, DumpIndexTool.IndexProcessingResults>> getIndexProcessingResults(File[] replicas) throws Exception {
long currentTimeMs = time.milliseconds();
Map<File, DumpIndexTool.IndexProcessingResults> results = new HashMap<>();
boolean sane = true;
for (File replica : replicas) {
logger.info("Processing segment files for replica {} ", replica);
DumpIndexTool.IndexProcessingResults result = dumpIndexTool.processIndex(replica, filterSet, currentTimeMs, throttler);
sane = sane && result.isIndexSane();
results.put(replica, result);
}
return new Pair<>(sane, results);
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class HelixClusterAggregator method doWork.
/**
* Take a {@link Map} of instance name to JSON string representation of {@link StatsWrapper} objects and perform cluster wide
* aggregation with them.
* @param statsWrappersJSON a {@link Map} of instance name to JSON string representation of {@link StatsWrapper} objects from the
* node level
* @return a {@link Pair} of Strings whose values represents valid quota stats across all partitions.
* First element is the raw (sum) aggregated stats and second element is average(aggregated) stats for all replicas
* for each partition.
* @throws IOException
*/
Pair<String, String> doWork(Map<String, String> statsWrappersJSON) throws IOException {
StatsSnapshot partitionSnapshot = new StatsSnapshot(0L, new HashMap<String, StatsSnapshot>());
Map<String, Long> partitionTimestampMap = new HashMap<>();
StatsSnapshot rawPartitionSnapshot = new StatsSnapshot(0L, new HashMap<String, StatsSnapshot>());
for (Map.Entry<String, String> statsWrapperJSON : statsWrappersJSON.entrySet()) {
if (statsWrapperJSON != null) {
StatsWrapper snapshotWrapper = mapper.readValue(statsWrapperJSON.getValue(), StatsWrapper.class);
StatsWrapper snapshotWrapperCopy = mapper.readValue(statsWrapperJSON.getValue(), StatsWrapper.class);
combineRaw(rawPartitionSnapshot, snapshotWrapper);
combine(partitionSnapshot, snapshotWrapperCopy, statsWrapperJSON.getKey(), partitionTimestampMap);
}
}
if (logger.isTraceEnabled()) {
logger.trace("Combined raw snapshot {}", mapper.writeValueAsString(rawPartitionSnapshot));
logger.trace("Combined snapshot {}", mapper.writeValueAsString(partitionSnapshot));
}
StatsSnapshot reducedRawSnapshot = reduce(rawPartitionSnapshot);
StatsSnapshot reducedSnapshot = reduce(partitionSnapshot);
if (logger.isTraceEnabled()) {
logger.trace("Reduced raw snapshot {}", mapper.writeValueAsString(reducedRawSnapshot));
logger.trace("Reduced snapshot {}", mapper.writeValueAsString(reducedSnapshot));
}
return new Pair<>(mapper.writeValueAsString(reducedRawSnapshot), mapper.writeValueAsString(reducedSnapshot));
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class MySqlReportAggregatorTask method removeInvalidAggregatedAccountAndContainerStats.
private void removeInvalidAggregatedAccountAndContainerStats(AggregatedAccountStorageStats currentStats) throws Exception {
AggregatedAccountStorageStats existingStats = accountStatsStore.queryAggregatedAccountStorageStats();
List<Pair<Short, Short>> toBeDeletedAccountAndContainers = new ArrayList<>();
for (Map.Entry<Short, Map<Short, ContainerStorageStats>> accountEntry : existingStats.getStorageStats().entrySet()) {
short accountId = accountEntry.getKey();
for (short containerId : accountEntry.getValue().keySet()) {
if (!currentStats.getStorageStats().containsKey(accountId) || !currentStats.getStorageStats().get(accountId).containsKey(containerId)) {
toBeDeletedAccountAndContainers.add(new Pair<>(accountId, containerId));
}
}
}
// delete the account/container stats that are no longer valid
for (Pair<Short, Short> accountContainer : toBeDeletedAccountAndContainers) {
accountStatsStore.deleteAggregatedAccountStatsForContainer(accountContainer.getFirst(), accountContainer.getSecond());
}
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class MySqlReportAggregatorTask method run.
@Override
public TaskResult run() {
Exception exception = null;
Histogram fetchTimeMs = statsReportType == StatsReportType.ACCOUNT_REPORT ? metrics.accountStatsFetchTimeMs : metrics.partitionClassStatsFetchTimeMs;
Histogram aggregationTimeMs = statsReportType == StatsReportType.ACCOUNT_REPORT ? metrics.accountStatsAggregationTimeMs : metrics.partitionClassStatsAggregationTimeMs;
long startTimeMs = System.currentTimeMillis();
StatsSnapshot accountPhysicalStorageSnapshot = null;
try {
List<String> instanceNames = manager.getClusterManagmentTool().getInstancesInCluster(manager.getClusterName());
if (statsReportType == StatsReportType.ACCOUNT_REPORT) {
Map<String, HostAccountStorageStatsWrapper> accountStatsWrappers = fetchAccountStorageStatsWrapperForInstances(instanceNames);
fetchTimeMs.update(System.currentTimeMillis() - startTimeMs);
logger.info("Aggregating stats from " + accountStatsWrappers.size() + " hosts");
Pair<AggregatedAccountStorageStats, AggregatedAccountStorageStats> results = clusterAggregator.aggregateHostAccountStorageStatsWrappers(accountStatsWrappers);
if (clusterMapConfig.clustermapEnableDeleteInvalidDataInMysqlAggregationTask) {
removeInvalidAggregatedAccountAndContainerStats(results.getSecond());
}
accountStatsStore.storeAggregatedAccountStorageStats(results.getSecond());
accountPhysicalStorageSnapshot = StorageStatsUtil.convertAggregatedAccountStorageStatsToStatsSnapshot(results.getFirst(), true);
} else if (statsReportType == StatsReportType.PARTITION_CLASS_REPORT) {
Map<String, HostPartitionClassStorageStatsWrapper> statsWrappers = fetchPartitionClassStorageStatsWrapperForInstances(instanceNames);
fetchTimeMs.update(System.currentTimeMillis() - startTimeMs);
logger.info("Aggregating stats from " + statsWrappers.size() + " hosts");
Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> results = clusterAggregator.aggregateHostPartitionClassStorageStatsWrappers(statsWrappers);
if (clusterMapConfig.clustermapEnableDeleteInvalidDataInMysqlAggregationTask) {
removeInvalidAggregatedPartitionClassStats(results.getSecond());
}
accountStatsStore.storeAggregatedPartitionClassStorageStats(results.getSecond());
}
// Check if there is a base report for this month or not.
if (clusterMapConfig.clustermapEnableAggregatedMonthlyAccountReport && statsReportType == StatsReportType.ACCOUNT_REPORT) {
// Get the month, if not the same month, then copy the aggregated stats and update the month
String currentMonthValue = LocalDateTime.ofEpochSecond(time.seconds(), 0, ZONE_OFFSET).format(TIMESTAMP_FORMATTER);
String recordedMonthValue = accountStatsStore.queryRecordedMonth();
if (recordedMonthValue == null || recordedMonthValue.isEmpty() || !currentMonthValue.equals(recordedMonthValue)) {
if (clusterMapConfig.clustermapEnableDeleteInvalidDataInMysqlAggregationTask) {
accountStatsStore.deleteSnapshotOfAggregatedAccountStats();
}
logger.info("Taking snapshot of aggregated stats for month " + currentMonthValue);
accountStatsStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(currentMonthValue);
}
}
aggregationTimeMs.update(System.currentTimeMillis() - startTimeMs);
return new TaskResult(TaskResult.Status.COMPLETED, "Aggregation success");
} catch (Exception e) {
logger.error("Exception thrown while aggregating stats from container stats reports across all nodes ", e);
exception = e;
return new TaskResult(TaskResult.Status.FAILED, "Exception thrown");
} finally {
if (clusterMapConfig.clustermapEnableContainerDeletionAggregation && callback != null && accountPhysicalStorageSnapshot != null && statsReportType.equals(StatsReportType.ACCOUNT_REPORT)) {
callback.onCompletion(accountPhysicalStorageSnapshot, exception);
}
}
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class MySqlClusterAggregatorTest method testAggregateHostPartitionClassStorageStatsWithOutdatedNode.
/**
* Test {@link MySqlClusterAggregator#aggregateHostPartitionClassStorageStatsWrappers} but with one node having outdated
* storage stats data. Outdated data shouldn't be used when aggregating valid storage stats.
* @throws Exception
*/
@Test
public void testAggregateHostPartitionClassStorageStatsWithOutdatedNode() throws Exception {
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> upToDateStorageStatsMap = new HashMap<>();
upToDateStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> outdatedStorageStatsMap = new HashMap<>();
outdatedStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 1, 1, Collections.emptyList());
HostPartitionClassStorageStatsWrapper upToDateNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(upToDateStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 0, 1, 1, Collections.emptyList());
HostPartitionClassStorageStatsWrapper outdatedNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(outdatedStorageStatsMap));
header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 0, 0, Collections.emptyList());
HostPartitionClassStorageStatsWrapper emptyNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats());
Map<String, HostPartitionClassStorageStatsWrapper> instanceToStatsMap = new LinkedHashMap<>();
instanceToStatsMap.put("Instance_0", upToDateNodeStats);
instanceToStatsMap.put("Instance_1", outdatedNodeStats);
instanceToStatsMap.put("Instance_2", emptyNodeStats);
Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostPartitionClassStorageStatsWrappers(instanceToStatsMap);
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> expectedValid = clusterAggregator.aggregateHostPartitionClassStorageStats(upToDateStorageStatsMap);
Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
Aggregations