use of com.github.ambry.utils.MockTime in project ambry by linkedin.
the class StatsManagerTest method testAccountExclusion.
/**
* Test AccountExclusion configuration.
* @throws Exception
*/
@Test
public void testAccountExclusion() throws Exception {
String excludedAccountNames = "account0,account10";
Properties properties = new Properties();
properties.put(StatsManagerConfig.STATS_PUBLISH_EXCLUDE_ACCOUNT_NAMES, excludedAccountNames);
properties.put(StatsManagerConfig.STATS_HEALTH_REPORT_EXCLUDE_ACCOUNT_NAMES, excludedAccountNames);
StatsManagerConfig newStatsManagerConfig = new StatsManagerConfig(new VerifiableProperties(properties));
// prepare account service, we are only going to fetch account0, and account10
inMemoryAccountService.updateAccounts(Arrays.asList(new AccountBuilder((short) 0, "account0", Account.AccountStatus.ACTIVE).build()));
StatsManager testManager = new StatsManager(storageManager, replicas, new MetricRegistry(), newStatsManagerConfig, new MockTime(), null, null, inMemoryAccountService);
List<Short> ids = testManager.getHealthReportExcludeAccountIds();
assertEquals(1, ids.size());
assertEquals((short) 0, ids.get(0).shortValue());
ids = testManager.getPublishExcludeAccountIds();
assertEquals(1, ids.size());
assertEquals((short) 0, ids.get(0).shortValue());
}
use of com.github.ambry.utils.MockTime in project ambry by linkedin.
the class StatsManagerTest method testAddAndRemoveReplica.
/**
* Test to verify the {@link StatsManager} behaves correctly when dynamically adding/removing {@link ReplicaId}.
* @throws Exception
*/
@Test
public void testAddAndRemoveReplica() throws Exception {
// setup testing environment
Map<PartitionId, Store> testStoreMap = new HashMap<>();
List<ReplicaId> testReplicas = new ArrayList<>();
DataNodeId dataNodeId = new MockDataNodeId(Collections.singletonList(new Port(6667, PortType.PLAINTEXT)), Collections.singletonList("/tmp"), "DC1");
for (int i = 0; i < 3; i++) {
PartitionId partitionId = new MockPartitionId(i, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
testStoreMap.put(partitionId, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(i), false)));
testReplicas.add(partitionId.getReplicaIds().get(0));
}
StorageManager mockStorageManager = new MockStorageManager(testStoreMap, dataNodeId);
StatsManager testStatsManager = new StatsManager(mockStorageManager, testReplicas, new MetricRegistry(), statsManagerConfig, new MockTime(), null, null, inMemoryAccountService);
// verify that adding an existing store to StatsManager should fail
assertFalse("Adding a store which already exists should fail", testStatsManager.addReplica(testReplicas.get(0)));
PartitionId partitionId3 = new MockPartitionId(3, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
testStoreMap.put(partitionId3, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(0), false)));
// verify that partitionId3 is not in stats report before adding to statsManager
StatsManager.AccountStatsPublisher publisher = testStatsManager.new AccountStatsPublisher(accountStatsStore);
publisher.run();
HostAccountStorageStatsWrapper statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
assertFalse("Partition3 should not present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId3.getId()));
// verify that after adding into statsManager, PartitionId3 is in stats report
testStatsManager.addReplica(partitionId3.getReplicaIds().get(0));
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
assertTrue("Partition3 should present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId3.getId()));
// verify that after removing PartitionId0 (corresponding to the first replica in replicas list), PartitionId0 is not in the stats report
PartitionId partitionId0 = testReplicas.get(0).getPartitionId();
assertTrue("Partition0 should present in stats report before removal", statsWrapper.getStats().getStorageStats().containsKey(partitionId0.getId()));
testStoreMap.remove(testReplicas.get(0).getPartitionId());
testStatsManager.removeReplica(testReplicas.get(0));
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
assertFalse("Partition0 should not present in stats report after removal", statsWrapper.getStats().getStorageStats().containsKey(partitionId0.getId()));
// verify that removing the PartitionId0 should fail because it no longer exists in StatsManager
assertFalse(testStatsManager.removeReplica(testReplicas.get(0)));
// concurrent remove test
CountDownLatch getStatsCountdown1 = new CountDownLatch(1);
CountDownLatch waitRemoveCountdown = new CountDownLatch(1);
((MockStorageManager) mockStorageManager).waitOperationCountdown = waitRemoveCountdown;
((MockStorageManager) mockStorageManager).firstCall = true;
((MockStorageManager) mockStorageManager).unreachablePartitions.clear();
for (Store store : testStoreMap.values()) {
((MockStore) store).getStatsCountdown = getStatsCountdown1;
((MockStore) store).isCollected = false;
}
List<PartitionId> partitionRemoved = new ArrayList<>();
Utils.newThread(() -> {
// wait until at least one store has been collected (this ensures stats aggregation using old snapshot of map)
try {
getStatsCountdown1.await();
} catch (InterruptedException e) {
throw new IllegalStateException("CountDown await was interrupted", e);
}
// find one store which hasn't been collected
ReplicaId replicaToRemove = null;
for (Map.Entry<PartitionId, Store> partitionToStore : testStoreMap.entrySet()) {
MockStore store = (MockStore) partitionToStore.getValue();
if (!store.isCollected) {
replicaToRemove = partitionToStore.getKey().getReplicaIds().get(0);
break;
}
}
if (replicaToRemove != null) {
testStatsManager.removeReplica(replicaToRemove);
testStoreMap.remove(replicaToRemove.getPartitionId());
partitionRemoved.add(replicaToRemove.getPartitionId());
// count down to allow stats aggregation to proceed
waitRemoveCountdown.countDown();
}
}, false).start();
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
// verify that the removed store is indeed unreachable during stats aggregation
assertTrue("The removed partition should be unreachable during aggregation", ((MockStorageManager) mockStorageManager).unreachablePartitions.contains(partitionRemoved.get(0)));
// verify unreachable store list doesn't contain the store which is removed.
List<String> unreachableStores = statsWrapper.getHeader().getUnreachableStores();
assertFalse("The removed partition should not present in unreachable list", unreachableStores.contains(partitionRemoved.get(0).toPathString()));
// concurrent add test
CountDownLatch getStatsCountdown2 = new CountDownLatch(1);
CountDownLatch waitAddCountdown = new CountDownLatch(1);
((MockStorageManager) mockStorageManager).waitOperationCountdown = waitAddCountdown;
((MockStorageManager) mockStorageManager).firstCall = true;
((MockStorageManager) mockStorageManager).unreachablePartitions.clear();
for (Store store : testStoreMap.values()) {
((MockStore) store).getStatsCountdown = getStatsCountdown2;
((MockStore) store).isCollected = false;
}
PartitionId partitionId4 = new MockPartitionId(4, MockClusterMap.DEFAULT_PARTITION_CLASS, Collections.singletonList((MockDataNodeId) dataNodeId), 0);
Utils.newThread(() -> {
// wait until at least one store has been collected (this ensures stats aggregation using old snapshot of map)
try {
getStatsCountdown2.await();
} catch (InterruptedException e) {
throw new IllegalStateException("CountDown await was interrupted", e);
}
testStatsManager.addReplica(partitionId4.getReplicaIds().get(0));
testStoreMap.put(partitionId4, new MockStore(new MockStoreStats(hostAccountStorageStats.getStorageStats().get(0), false)));
// count down to allow stats aggregation to proceed
waitAddCountdown.countDown();
}, false).start();
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
// verify that new added PartitionId4 is not in report for this round of aggregation
assertFalse("Partition4 should not present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId4.getId()));
// verify that new added PartitionId4 will be collected for next round of aggregation
publisher.run();
statsWrapper = accountStatsStore.queryHostAccountStorageStatsByHost("localhost", 0);
assertTrue("Partition4 should present in stats report", statsWrapper.getStats().getStorageStats().containsKey(partitionId4.getId()));
}
use of com.github.ambry.utils.MockTime in project ambry by linkedin.
the class MockBlobStoreStats method testDifferentMessageRetentionDays.
/**
* Tests {@link CompactionManager#getCompactionDetails(BlobStore)} for different values for
* {@link StoreConfig#storeDeletedMessageRetentionHours}
*/
@Test
public void testDifferentMessageRetentionDays() throws StoreException, InterruptedException {
List<LogSegmentName> bestCandidates = null;
int[] messageRetentionHoursValues = new int[] { 1, 2, 3, 6, 9 };
for (int messageRetentionHours : messageRetentionHoursValues) {
time = new MockTime();
Pair<MockBlobStore, StoreConfig> initState = initializeBlobStore(properties, time, -1, messageRetentionHours, DEFAULT_MAX_BLOB_SIZE);
if (compactionPolicy instanceof StatsBasedCompactionPolicy) {
bestCandidates = setUpStateForStatsBasedCompactionPolicy(blobStore, mockBlobStoreStats);
compactionPolicy = new StatsBasedCompactionPolicy(initState.getSecond(), time);
} else if (compactionPolicy instanceof CompactAllPolicy) {
blobStore.logSegmentsNotInJournal = generateRandomLogSegmentName(3);
bestCandidates = blobStore.logSegmentsNotInJournal;
compactionPolicy = new CompactAllPolicy(initState.getSecond(), time);
}
verifyCompactionDetails(new CompactionDetails(time.milliseconds() - TimeUnit.HOURS.toMillis(messageRetentionHours), bestCandidates, null), blobStore, compactionPolicy);
}
}
Aggregations