Search in sources :

Example 11 with StatsHeader

use of com.github.ambry.server.StatsHeader in project ambry by linkedin.

the class AccountStatsMySqlStoreTest method testLocalBackupFile.

@Test
public void testLocalBackupFile() throws IOException {
    // First, make sure there is no local backup file.
    Path tempDir = Files.createTempDirectory("AccountStatsMySqlStoreTest");
    Path localBackupFilePath = tempDir.resolve("localbackup");
    Properties prop = new Properties();
    prop.setProperty(AccountStatsMySqlConfig.LOCAL_BACKUP_FILE_PATH, localBackupFilePath.toString());
    AccountStatsMySqlConfig accountStatsMySqlConfig = new AccountStatsMySqlConfig(new VerifiableProperties(prop));
    AccountStatsMySqlStore store = new AccountStatsMySqlStore(accountStatsMySqlConfig, mockDataSource, clusterName, hostname, null, new MetricRegistry());
    assertNull(store.getPreviousHostAccountStorageStatsWrapper());
    // Second, save a backup file.
    HostAccountStorageStats hostAccountStorageStats = new HostAccountStorageStats(StorageStatsUtilTest.generateRandomHostAccountStorageStats(10, 10, 10, 10000L, 2, 10));
    StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, System.currentTimeMillis(), 10, 10, null);
    HostAccountStorageStatsWrapper statsWrapper = new HostAccountStorageStatsWrapper(header, hostAccountStorageStats);
    ObjectMapper objectMapper = new ObjectMapper();
    objectMapper.writeValue(localBackupFilePath.toFile(), statsWrapper);
    store = new AccountStatsMySqlStore(accountStatsMySqlConfig, mockDataSource, clusterName, hostname, null, new MetricRegistry());
    HostAccountStorageStatsWrapper backupWrapper = store.getPreviousHostAccountStorageStatsWrapper();
    assertNotNull(backupWrapper);
    assertStatsHeader(backupWrapper.getHeader(), 10, 10);
    Assert.assertEquals(hostAccountStorageStats.getStorageStats(), backupWrapper.getStats().getStorageStats());
}
Also used : Path(java.nio.file.Path) HostAccountStorageStats(com.github.ambry.server.storagestats.HostAccountStorageStats) HostAccountStorageStatsWrapper(com.github.ambry.server.HostAccountStorageStatsWrapper) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StatsHeader(com.github.ambry.server.StatsHeader) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) AccountStatsMySqlConfig(com.github.ambry.config.AccountStatsMySqlConfig) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) StorageStatsUtilTest(com.github.ambry.server.StorageStatsUtilTest)

Example 12 with StatsHeader

use of com.github.ambry.server.StatsHeader in project ambry by linkedin.

the class HelixClusterAggregator method doWorkOnStatsWrapperMap.

Pair<StatsSnapshot, StatsSnapshot> doWorkOnStatsWrapperMap(Map<String, StatsWrapper> statsWrappers, StatsReportType type, boolean removeExceptionOnType) throws IOException {
    StatsSnapshot partitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
    Map<String, Long> partitionTimestampMap = new HashMap<>();
    StatsSnapshot rawPartitionSnapshot = new StatsSnapshot(0L, new HashMap<>());
    if (removeExceptionOnType) {
        exceptionOccurredInstances.remove(type);
    }
    for (Map.Entry<String, StatsWrapper> statsWrapperEntry : statsWrappers.entrySet()) {
        if (statsWrapperEntry != null && statsWrapperEntry.getValue() != null) {
            try {
                StatsWrapper snapshotWrapper = statsWrapperEntry.getValue();
                StatsWrapper snapshotWrapperCopy = new StatsWrapper(new StatsHeader(snapshotWrapper.getHeader()), new StatsSnapshot(snapshotWrapper.getSnapshot()));
                combineRawStats(rawPartitionSnapshot, snapshotWrapper);
                switch(type) {
                    case ACCOUNT_REPORT:
                        combineValidStatsByAccount(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
                        break;
                    case PARTITION_CLASS_REPORT:
                        combineValidStatsByPartitionClass(partitionSnapshot, snapshotWrapperCopy, statsWrapperEntry.getKey(), partitionTimestampMap);
                        break;
                    default:
                        throw new IllegalArgumentException("Unrecognized stats report type: " + type);
                }
            } catch (Exception e) {
                logger.error("Exception occurred while processing stats from {}", statsWrapperEntry.getKey(), e);
                exceptionOccurredInstances.computeIfAbsent(type, key -> new ArrayList<>()).add(statsWrapperEntry.getKey());
            }
        }
    }
    if (logger.isTraceEnabled()) {
        logger.trace("Combined raw snapshot {}", mapper.writeValueAsString(rawPartitionSnapshot));
        logger.trace("Combined valid snapshot {}", mapper.writeValueAsString(partitionSnapshot));
    }
    StatsSnapshot reducedRawSnapshot;
    StatsSnapshot reducedSnapshot;
    switch(type) {
        case ACCOUNT_REPORT:
            reducedRawSnapshot = reduceByAccount(rawPartitionSnapshot);
            reducedSnapshot = reduceByAccount(partitionSnapshot);
            break;
        case PARTITION_CLASS_REPORT:
            reducedRawSnapshot = reduceByPartitionClass(rawPartitionSnapshot);
            reducedSnapshot = reduceByPartitionClass(partitionSnapshot);
            break;
        default:
            throw new IllegalArgumentException("Unrecognized stats report type: " + type);
    }
    reducedRawSnapshot.removeZeroValueSnapshots();
    reducedSnapshot.removeZeroValueSnapshots();
    if (logger.isTraceEnabled()) {
        logger.trace("Reduced raw snapshot {}", mapper.writeValueAsString(reducedRawSnapshot));
        logger.trace("Reduced valid snapshot {}", mapper.writeValueAsString(reducedSnapshot));
    }
    return new Pair<>(reducedRawSnapshot, reducedSnapshot);
}
Also used : HashMap(java.util.HashMap) StatsHeader(com.github.ambry.server.StatsHeader) IOException(java.io.IOException) HashMap(java.util.HashMap) Map(java.util.Map) StatsWrapper(com.github.ambry.server.StatsWrapper) StatsSnapshot(com.github.ambry.server.StatsSnapshot) Pair(com.github.ambry.utils.Pair)

Example 13 with StatsHeader

use of com.github.ambry.server.StatsHeader in project ambry by linkedin.

the class MySqlClusterAggregatorTest method testAggregateHostAccountStorageStatsWithEarlyNode.

/**
 * Test {@link MySqlClusterAggregator#aggregateHostAccountStorageStatsWrappers} with one node having earlier timestamp
 * but larger storage stats data. Larger data should be used when aggregating valid storage stats.
 * @throws Exception
 */
@Test
public void testAggregateHostAccountStorageStatsWithEarlyNode() throws Exception {
    Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> upToDateStorageStatsMap = new HashMap<>();
    upToDateStorageStatsMap.put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10L, 2, 10));
    // Use a much larger maximum number when creating early storage stats so the values would be larger here.
    Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> earlyStorageStatsMap = new HashMap<>();
    earlyStorageStatsMap.put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 6, 3, 10000L, 2, 10));
    StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(RELEVANT_PERIOD_IN_MINUTES / 2), 1, 1, Collections.emptyList());
    HostAccountStorageStatsWrapper upToDateNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(upToDateStorageStatsMap));
    header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 0, 1, 1, Collections.emptyList());
    HostAccountStorageStatsWrapper earlyNodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(earlyStorageStatsMap));
    Map<String, HostAccountStorageStatsWrapper> instanceToStatsMap = new LinkedHashMap<>();
    instanceToStatsMap.put("Instance_0", upToDateNodeStats);
    instanceToStatsMap.put("Instance_1", earlyNodeStats);
    Pair<AggregatedAccountStorageStats, AggregatedAccountStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostAccountStorageStatsWrappers(instanceToStatsMap);
    Map<Short, Map<Short, ContainerStorageStats>> expectedValid = clusterAggregator.aggregateHostAccountStorageStats(earlyStorageStatsMap);
    Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
Also used : HostAccountStorageStatsWrapper(com.github.ambry.server.HostAccountStorageStatsWrapper) HostAccountStorageStats(com.github.ambry.server.storagestats.HostAccountStorageStats) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) StatsHeader(com.github.ambry.server.StatsHeader) LinkedHashMap(java.util.LinkedHashMap) ContainerStorageStats(com.github.ambry.server.storagestats.ContainerStorageStats) AggregatedAccountStorageStats(com.github.ambry.server.storagestats.AggregatedAccountStorageStats) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) Test(org.junit.Test) StorageStatsUtilTest(com.github.ambry.server.StorageStatsUtilTest)

Example 14 with StatsHeader

use of com.github.ambry.server.StatsHeader in project ambry by linkedin.

the class MySqlClusterAggregatorTest method testAggregateHostPartitionClassStorageStatsWithOutdatedNode.

/**
 * Test {@link MySqlClusterAggregator#aggregateHostPartitionClassStorageStatsWrappers} but with one node having outdated
 * storage stats data. Outdated data shouldn't be used when aggregating valid storage stats.
 * @throws Exception
 */
@Test
public void testAggregateHostPartitionClassStorageStatsWithOutdatedNode() throws Exception {
    Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> upToDateStorageStatsMap = new HashMap<>();
    upToDateStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
    Map<String, Map<Long, Map<Short, Map<Short, ContainerStorageStats>>>> outdatedStorageStatsMap = new HashMap<>();
    outdatedStorageStatsMap.computeIfAbsent("default", k -> new HashMap<>()).put((long) 0, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 5, 3, 10000L, 2, 10));
    StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 1, 1, Collections.emptyList());
    HostPartitionClassStorageStatsWrapper upToDateNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(upToDateStorageStatsMap));
    header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 0, 1, 1, Collections.emptyList());
    HostPartitionClassStorageStatsWrapper outdatedNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats(outdatedStorageStatsMap));
    header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, TimeUnit.MINUTES.toMillis(2 * RELEVANT_PERIOD_IN_MINUTES), 0, 0, Collections.emptyList());
    HostPartitionClassStorageStatsWrapper emptyNodeStats = new HostPartitionClassStorageStatsWrapper(header, new HostPartitionClassStorageStats());
    Map<String, HostPartitionClassStorageStatsWrapper> instanceToStatsMap = new LinkedHashMap<>();
    instanceToStatsMap.put("Instance_0", upToDateNodeStats);
    instanceToStatsMap.put("Instance_1", outdatedNodeStats);
    instanceToStatsMap.put("Instance_2", emptyNodeStats);
    Pair<AggregatedPartitionClassStorageStats, AggregatedPartitionClassStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostPartitionClassStorageStatsWrappers(instanceToStatsMap);
    Map<String, Map<Short, Map<Short, ContainerStorageStats>>> expectedValid = clusterAggregator.aggregateHostPartitionClassStorageStats(upToDateStorageStatsMap);
    Assert.assertEquals(expectedValid, aggregatedRawAndValidStats.getSecond().getStorageStats());
}
Also used : HostPartitionClassStorageStats(com.github.ambry.server.storagestats.HostPartitionClassStorageStats) HostAccountStorageStatsWrapper(com.github.ambry.server.HostAccountStorageStatsWrapper) AggregatedPartitionClassStorageStats(com.github.ambry.server.storagestats.AggregatedPartitionClassStorageStats) Pair(com.github.ambry.utils.Pair) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) StatsHeader(com.github.ambry.server.StatsHeader) HostAccountStorageStats(com.github.ambry.server.storagestats.HostAccountStorageStats) HashMap(java.util.HashMap) Test(org.junit.Test) ContainerStorageStats(com.github.ambry.server.storagestats.ContainerStorageStats) LinkedHashMap(java.util.LinkedHashMap) TimeUnit(java.util.concurrent.TimeUnit) AggregatedAccountStorageStats(com.github.ambry.server.storagestats.AggregatedAccountStorageStats) Map(java.util.Map) TypeReference(com.fasterxml.jackson.core.type.TypeReference) Assert(org.junit.Assert) HostPartitionClassStorageStatsWrapper(com.github.ambry.server.HostPartitionClassStorageStatsWrapper) Collections(java.util.Collections) StorageStatsUtilTest(com.github.ambry.server.StorageStatsUtilTest) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) StatsHeader(com.github.ambry.server.StatsHeader) HostPartitionClassStorageStats(com.github.ambry.server.storagestats.HostPartitionClassStorageStats) LinkedHashMap(java.util.LinkedHashMap) ContainerStorageStats(com.github.ambry.server.storagestats.ContainerStorageStats) HostPartitionClassStorageStatsWrapper(com.github.ambry.server.HostPartitionClassStorageStatsWrapper) AggregatedPartitionClassStorageStats(com.github.ambry.server.storagestats.AggregatedPartitionClassStorageStats) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) Test(org.junit.Test) StorageStatsUtilTest(com.github.ambry.server.StorageStatsUtilTest)

Example 15 with StatsHeader

use of com.github.ambry.server.StatsHeader in project ambry by linkedin.

the class MySqlClusterAggregatorTest method testAggregateHostAccountStorageStats.

/**
 * Test basic functionality of {@link MySqlClusterAggregator#aggregateHostAccountStorageStats}.
 * @throws Exception
 */
@Test
public void testAggregateHostAccountStorageStats() throws Exception {
    int nodeCount = 3;
    // Partition id to account id to container id to container storage stats.
    Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStatsMap = new HashMap<>();
    // Create storage stats for 3 partitions, each has 3, 4 and 5 accounts.
    for (int i = 0; i < 3; i++) {
        storageStatsMap.put((long) i, StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, i + 3, 3, 10000L, 2, 10));
    }
    StatsHeader header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 3, 3, Collections.emptyList());
    HostAccountStorageStatsWrapper nodeStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats(storageStatsMap));
    header = new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, DEFAULT_TIMESTAMP, 0, 0, Collections.emptyList());
    HostAccountStorageStatsWrapper emptyStats = new HostAccountStorageStatsWrapper(header, new HostAccountStorageStats());
    Map<String, HostAccountStorageStatsWrapper> instanceToStatsMap = new HashMap<>();
    // So the combined raw data would be three times of the aggregated data (we have 3 nodes).
    for (int i = 0; i < nodeCount; i++) {
        instanceToStatsMap.put("Instance_" + i, new HostAccountStorageStatsWrapper(new StatsHeader(nodeStats.getHeader()), new HostAccountStorageStats(nodeStats.getStats())));
    }
    instanceToStatsMap.put("Instance_" + nodeCount, emptyStats);
    Pair<AggregatedAccountStorageStats, AggregatedAccountStorageStats> aggregatedRawAndValidStats = clusterAggregator.aggregateHostAccountStorageStatsWrappers(instanceToStatsMap);
    Map<Short, Map<Short, ContainerStorageStats>> expectedAggregatedStorageStatsMap = clusterAggregator.aggregateHostAccountStorageStats(storageStatsMap);
    Assert.assertEquals(expectedAggregatedStorageStatsMap, aggregatedRawAndValidStats.getSecond().getStorageStats());
    assertAggregatedRawStatsForAccountStorageStats(aggregatedRawAndValidStats.getFirst().getStorageStats(), expectedAggregatedStorageStatsMap, nodeCount);
}
Also used : HostAccountStorageStatsWrapper(com.github.ambry.server.HostAccountStorageStatsWrapper) HostAccountStorageStats(com.github.ambry.server.storagestats.HostAccountStorageStats) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) StatsHeader(com.github.ambry.server.StatsHeader) ContainerStorageStats(com.github.ambry.server.storagestats.ContainerStorageStats) AggregatedAccountStorageStats(com.github.ambry.server.storagestats.AggregatedAccountStorageStats) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) Test(org.junit.Test) StorageStatsUtilTest(com.github.ambry.server.StorageStatsUtilTest)

Aggregations

StatsHeader (com.github.ambry.server.StatsHeader)17 HashMap (java.util.HashMap)14 HostAccountStorageStatsWrapper (com.github.ambry.server.HostAccountStorageStatsWrapper)13 HostAccountStorageStats (com.github.ambry.server.storagestats.HostAccountStorageStats)12 Map (java.util.Map)12 ContainerStorageStats (com.github.ambry.server.storagestats.ContainerStorageStats)10 StorageStatsUtilTest (com.github.ambry.server.StorageStatsUtilTest)9 Test (org.junit.Test)9 AggregatedAccountStorageStats (com.github.ambry.server.storagestats.AggregatedAccountStorageStats)7 LinkedHashMap (java.util.LinkedHashMap)6 HostPartitionClassStorageStatsWrapper (com.github.ambry.server.HostPartitionClassStorageStatsWrapper)5 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)4 StatsSnapshot (com.github.ambry.server.StatsSnapshot)4 StatsWrapper (com.github.ambry.server.StatsWrapper)4 HostPartitionClassStorageStats (com.github.ambry.server.storagestats.HostPartitionClassStorageStats)4 AggregatedPartitionClassStorageStats (com.github.ambry.server.storagestats.AggregatedPartitionClassStorageStats)3 Pair (com.github.ambry.utils.Pair)3 Collections (java.util.Collections)3 MetricRegistry (com.codahale.metrics.MetricRegistry)2 TypeReference (com.fasterxml.jackson.core.type.TypeReference)2