use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class ReplicationTest method blockDeprecatedContainerReplicationTest.
/**
* Tests if deprecated containers have been blocked during replication.
*/
@Test
public void blockDeprecatedContainerReplicationTest() throws Exception {
Properties properties = new Properties();
properties.setProperty("replication.container.deletion.enabled", "true");
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
BlobId b0 = generateRandomBlobId(partitionId);
conversionMap.put(b0, b0);
BlobId b1 = generateRandomBlobId(partitionId);
conversionMap.put(b1, b1);
// add 2 messages to both hosts.
storeKeyConverter.setConversionMap(conversionMap);
storeKeyConverter.convert(conversionMap.keySet());
// addPutMessagesToReplicasOfPartition(Arrays.asList(b0), Arrays.asList(localHost, remoteHost));
// add 3 messages to the remote host only
addPutMessagesToReplicasOfPartition(Arrays.asList(b0, b1), Collections.singletonList(remoteHost));
}
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), clusterMap.getReplicaIds(localHost.dataNodeId));
replicationMetrics.populateSingleColoMetrics(remoteHost.dataNodeId.getDatacenterName());
List<RemoteReplicaInfo> remoteReplicaInfoList = localHost.getRemoteReplicaInfos(remoteHost, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = Collections.singletonMap(remoteHost.dataNodeId, remoteReplicaInfoList);
storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
Map<DataNodeId, MockHost> hosts = new HashMap<>();
hosts.put(remoteHost.dataNodeId, remoteHost);
MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, batchSize);
Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), localHost.dataNodeId, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, localHost.dataNodeId.getDatacenterName(), new ResponseHandler(clusterMap), time, null, skipPredicate);
for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfoList) {
replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
}
List<RemoteReplicaInfo> remoteReplicaInfos = replicasToReplicate.get(remoteHost.dataNodeId);
DataNodeId remoteNode = remoteReplicaInfos.get(0).getReplicaId().getDataNodeId();
ReplicaMetadataResponse response = replicaThread.getReplicaMetadataResponse(remoteReplicaInfos, new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteNode);
// case1 DELETE_IN_PROGRESS container with retention time qualified.
for (int i = 0; i < 2; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getDeleteTriggerTime()).thenReturn(System.currentTimeMillis() - TimeUnit.DAYS.toMillis(replicationConfig.replicationContainerDeletionRetentionDays + 1));
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.DELETE_IN_PROGRESS);
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All DELETE_IN_PROGRESS blobs qualified with retention time should be skipped during replication", 0, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
// case2 DELETE_IN_PROGRESS container with retention time not qualified.
for (int i = 2; i < 4; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.DELETE_IN_PROGRESS);
Mockito.when(container.getDeleteTriggerTime()).thenReturn(System.currentTimeMillis());
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All DELETE_IN_PROGRESS blobs not qualified with retention time should not be skipped during replication", 2, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
// case3 INACTIVE container
for (int i = 4; i < 6; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.INACTIVE);
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All INACTIVE blobs should be skipped during replication", 0, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
// case 4 ACTIVE Container
for (int i = 6; i < 8; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.ACTIVE);
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All non-deprecated blobs should not be skipped during replication", 2, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
}
use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class ReplicationTest method addAndRemoveReplicaTest.
/**
* Test dynamically add/remove replica in {@link ReplicationManager}
* @throws Exception
*/
@Test
public void addAndRemoveReplicaTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
DataNodeId dataNodeId = clusterMap.getDataNodeIds().get(0);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, dataNodeId, null, null, new MockTime(), null, new InMemAccountService(false, false));
storageManager.start();
MockReplicationManager replicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, dataNodeId, storeKeyConverterFactory, null);
ReplicaId replicaToTest = clusterMap.getReplicaIds(dataNodeId).get(0);
// Attempting to add replica that already exists should fail
assertFalse("Adding an existing replica should fail", replicationManager.addReplica(replicaToTest));
// Create a brand new replica that sits on one of the disk of datanode, add it into replication manager
PartitionId newPartition = clusterMap.createNewPartition(clusterMap.getDataNodes());
for (ReplicaId replicaId : newPartition.getReplicaIds()) {
if (replicaId.getDataNodeId() == dataNodeId) {
replicaToTest = replicaId;
break;
}
}
// Before adding replica, partitionToPartitionInfo and mountPathToPartitionInfos should not contain new partition
assertFalse("partitionToPartitionInfo should not contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
for (PartitionInfo partitionInfo : replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath())) {
assertNotSame("mountPathToPartitionInfos should not contain new partition", partitionInfo.getPartitionId(), newPartition);
}
// Add new replica to replication manager
assertTrue("Adding new replica to replication manager should succeed", replicationManager.addReplica(replicaToTest));
// After adding replica, partitionToPartitionInfo and mountPathToPartitionInfos should contain new partition
assertTrue("partitionToPartitionInfo should contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
Optional<PartitionInfo> newPartitionInfo = replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath()).stream().filter(partitionInfo -> partitionInfo.getPartitionId() == newPartition).findAny();
assertTrue("mountPathToPartitionInfos should contain new partition info", newPartitionInfo.isPresent());
// Verify that all remoteReplicaInfos of new added replica have assigned thread
for (RemoteReplicaInfo remoteReplicaInfo : newPartitionInfo.get().getRemoteReplicaInfos()) {
assertNotNull("The remote replica should be assigned to one replica thread", remoteReplicaInfo.getReplicaThread());
}
// Remove replica
assertTrue("Remove replica from replication manager should succeed", replicationManager.removeReplica(replicaToTest));
// Verify replica is removed, so partitionToPartitionInfo and mountPathToPartitionInfos should not contain new partition
assertFalse("partitionToPartitionInfo should not contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
for (PartitionInfo partitionInfo : replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath())) {
assertNotSame("mountPathToPartitionInfos should not contain new partition", partitionInfo.getPartitionId(), newPartition);
}
// Verify that none of remoteReplicaInfo should have assigned thread
for (RemoteReplicaInfo remoteReplicaInfo : newPartitionInfo.get().getRemoteReplicaInfos()) {
assertNull("The remote replica should be assigned to one replica thread", remoteReplicaInfo.getReplicaThread());
}
// Remove the same replica that doesn't exist should be no-op
ReplicationManager mockManager = Mockito.spy(replicationManager);
assertFalse("Remove non-existent replica should return false", replicationManager.removeReplica(replicaToTest));
verify(mockManager, never()).removeRemoteReplicaInfoFromReplicaThread(anyList());
storageManager.shutdown();
}
use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class ReplicationTest method limitMaxPartitionCountPerRequestTest.
/**
* Test that max partition count per request is honored in {@link ReplicaThread} if there are too many partitions to
* replicate from the remote node.
* @throws Exception
*/
@Test
public void limitMaxPartitionCountPerRequestTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
List<PartitionId> partitionIds = clusterMap.getAllPartitionIds(null);
for (PartitionId partitionId : partitionIds) {
// add 5 messages into each partition and place it on remote host only
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 5);
}
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
MockStoreKeyConverterFactory mockStoreKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
mockStoreKeyConverterFactory.setReturnInputIfAbsent(true);
mockStoreKeyConverterFactory.setConversionMap(new HashMap<>());
// we set batchSize to 10 in order to get all messages from one partition within single replication cycle
int batchSize = 10;
StoreKeyConverter storeKeyConverter = mockStoreKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
// we set max partition count per request to 5, which forces thread to replicate replicas in two cycles. (Note that
// number of partition to replicate is 10, they will be replicated in two batches)
ReplicationConfig initialReplicationConfig = replicationConfig;
properties.setProperty("replication.max.partition.count.per.request", String.valueOf(5));
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
CountDownLatch replicationCompleted = new CountDownLatch(partitionIds.size());
AtomicReference<Exception> exception = new AtomicReference<>();
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, (store, messageInfos) -> {
try {
replicationCompleted.countDown();
// for each partition, replication should complete within single cycle (fetch once should suffice), so
// we shut down local store once blobs are written. This can avoid unnecessary metadata requests sent to
// remote host.
store.shutdown();
} catch (Exception e) {
exception.set(e);
}
}, null);
ReplicaThread replicaThread = replicasAndThread.getSecond();
Thread thread = Utils.newThread(replicaThread, false);
thread.start();
assertTrue("Replication didn't complete within 10 secs", replicationCompleted.await(10, TimeUnit.SECONDS));
// verify the # of replicas per metadata request is limited to 5 (note that there are 10 replicas to replicate, they
// are split into to 2 small batches and get replicated in separate requests)
assertEquals("There should be 2 metadata requests and each has 5 replicas to replicate", Arrays.asList(5, 5), remoteHost.replicaCountPerRequestTracker);
// shutdown
replicaThread.shutdown();
if (exception.get() != null) {
throw exception.get();
}
replicationConfig = initialReplicationConfig;
}
use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class ReplicationTest method replicaThreadSleepTest.
@Test
public void replicaThreadSleepTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
long expectedThrottleDurationMs = localHost.dataNodeId.getDatacenterName().equals(remoteHost.dataNodeId.getDatacenterName()) ? replicationConfig.replicationIntraReplicaThreadThrottleSleepDurationMs : replicationConfig.replicationInterReplicaThreadThrottleSleepDurationMs;
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = replicasAndThread.getFirst();
ReplicaThread replicaThread = replicasAndThread.getSecond();
// populate data, add 1 messages to both hosts.
for (PartitionId partitionId : clusterMap.getAllPartitionIds(null)) {
addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), 1);
}
// tests to verify replica thread throttling and idling functions in the following steps:
// 1. all replicas are in sync, thread level sleep and replica quarantine are both enabled.
// 2. add put messages to some replica and verify that replication for replicas remain disabled.
// 3. forward the time so replication for replicas are re-enabled and check replication resumes.
// 4. add more put messages to ensure replication happens continuously when needed and is throttled appropriately.
// 1. verify that the replica thread sleeps and replicas are temporarily disable when all replicas are synced.
List<List<RemoteReplicaInfo>> replicasToReplicateList = new ArrayList<>(replicasToReplicate.values());
// replicate is called and time is moved forward to prepare the replicas for testing.
replicaThread.replicate();
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs + 1);
long currentTimeMs = time.milliseconds();
replicaThread.replicate();
for (List<RemoteReplicaInfo> replicaInfos : replicasToReplicateList) {
for (RemoteReplicaInfo replicaInfo : replicaInfos) {
assertEquals("Unexpected re-enable replication time", currentTimeMs + replicationConfig.replicationSyncedReplicaBackoffDurationMs, replicaInfo.getReEnableReplicationTime());
}
}
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replicas are in sync, replica thread should sleep by replication.thread.idle.sleep.duration.ms", currentTimeMs + replicationConfig.replicationReplicaThreadIdleSleepDurationMs, time.milliseconds());
// 2. add 3 messages to a partition in the remote host only and verify replication for all replicas should be disabled.
PartitionId partitionId = clusterMap.getWritablePartitionIds(null).get(0);
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 3);
int[] missingKeys = new int[replicasToReplicate.get(remoteHost.dataNodeId).size()];
for (int i = 0; i < missingKeys.length; i++) {
missingKeys[i] = replicasToReplicate.get(remoteHost.dataNodeId).get(i).getReplicaId().getPartitionId().isEqual(partitionId.toPathString()) ? 3 : 0;
}
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replication for all replicas should be disabled and the thread should sleep", currentTimeMs + replicationConfig.replicationReplicaThreadIdleSleepDurationMs, time.milliseconds());
assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
// 3. forward the time and run replicate and verify the replication.
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
replicaThread.replicate();
missingKeys = new int[replicasToReplicate.get(remoteHost.dataNodeId).size()];
assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
// Since, now we moved setting of remoteReplicaInfo::setReEnableReplicationTime inside replicaThread::exchangeMetaData and
// above assertMissingKeys() does exchangeMetadata() for replicas up to date, each replica will have
// ReEnableReplicationTime set by replicationSyncedReplicaBackoffDurationMs. Forward the time here.
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
// 4. add more put messages and verify that replication continues and is throttled appropriately.
addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), 3);
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replica thread should sleep exactly " + expectedThrottleDurationMs + " since remote has new token", currentTimeMs + expectedThrottleDurationMs, time.milliseconds());
assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
// Since, now we moved setting of remoteReplicaInfo::setReEnableReplicationTime inside replicaThread::exchangeMetaData and
// above assertMissingKeys() does exchangeMetadata() for replicas up to date, each replica will have
// ReEnableReplicationTime set by replicationSyncedReplicaBackoffDurationMs. Forward the time here.
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
// verify that throttling on the replica thread is disabled when relevant configs are 0.
Properties properties = new Properties();
properties.setProperty("replication.intra.replica.thread.throttle.sleep.duration.ms", "0");
properties.setProperty("replication.inter.replica.thread.throttle.sleep.duration.ms", "0");
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
replicaThread = replicasAndThread.getSecond();
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replica thread should not sleep when throttling is disabled and replicas are out of sync", currentTimeMs, time.milliseconds());
}
use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class ReplicationTest method replicaFromStandbyToLeaderTest.
/**
* Test state transition in replication manager from STANDBY to LEADER
* Test setup: When creating partitions, make sure that there is exactly one replica in LEADER STATE on each data center
* Test condition: When a partition on current node moves from standby to leader, verify that in-memory map storing
* partition to peer leader replicas is updated correctly
* @throws Exception
*/
@Test
public void replicaFromStandbyToLeaderTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
MockHelixParticipant.metricRegistry = new MetricRegistry();
MockHelixParticipant mockHelixParticipant = new MockHelixParticipant(clusterMapConfig);
ReplicationConfig initialReplicationConfig = replicationConfig;
properties.setProperty("replication.model.across.datacenters", "LEADER_BASED");
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant);
StorageManager storageManager = managers.getFirst();
MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
List<ReplicaId> replicaIds = clusterMap.getReplicaIds(replicationManager.dataNodeId);
for (ReplicaId replicaId : replicaIds) {
MockReplicaId mockReplicaId = (MockReplicaId) replicaId;
if (mockReplicaId.getReplicaState() == ReplicaState.LEADER) {
PartitionId existingPartition = mockReplicaId.getPartitionId();
mockHelixParticipant.onPartitionBecomeLeaderFromStandby(existingPartition.toPathString());
Set<ReplicaId> peerLeaderReplicasInReplicationManager = replicationManager.leaderBasedReplicationAdmin.getLeaderPartitionToPeerLeaderReplicas().get(existingPartition.toPathString());
Set<ReplicaId> peerLeaderReplicasInClusterMap = new HashSet<>(existingPartition.getReplicaIdsByState(ReplicaState.LEADER, null));
peerLeaderReplicasInClusterMap.remove(mockReplicaId);
assertThat("Mismatch in list of leader peer replicas stored by partition in replication manager and cluster map", peerLeaderReplicasInReplicationManager, is(peerLeaderReplicasInClusterMap));
}
}
storageManager.shutdown();
replicationConfig = initialReplicationConfig;
}
Aggregations