use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_NotDelete.
/**
* Tests when lifeVersion in local is less than the lifeVersion in remote and the final state is not
* delete, it would be Put, TtlUpdate or Undelete.
* @throws Exception
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_NotDelete() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
// 5 P0, D0 -> [U1]
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), (short) 1, 5);
for (StoreKey id : ids) {
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, (short) 1);
}
// 1 Missing
StoreKey id = ids.get(0);
// 2 P0 -> [U1, T1]
id = ids.get(1);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
// 3 P0, T0 -> [U1]
id = ids.get(2);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
// 4 P0, T0, D0 -> [U1]
id = ids.get(3);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, UPDATED_EXPIRY_TIME_MS);
// 5 P, D -> [U, T, D]
id = ids.get(4);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, UPDATED_EXPIRY_TIME_MS);
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// There is one missing key
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(1, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos, response, false);
// Before exchange metadata, the number of message infos in local host is 8. Exchange metadata would add another 8.
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 16, localInfoEntry.getValue().size());
}
for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
for (StoreKey id : idsEntry.getValue()) {
if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
assertTrue(localInfo.isDeleted());
assertTrue(remoteInfo.isDeleted());
assertTrue(localInfo.isTtlUpdated());
assertTrue(remoteInfo.isTtlUpdated());
assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
}
}
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ReplicationTest method addAndRemoveReplicaTest.
/**
* Test dynamically add/remove replica in {@link ReplicationManager}
* @throws Exception
*/
@Test
public void addAndRemoveReplicaTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
DataNodeId dataNodeId = clusterMap.getDataNodeIds().get(0);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, dataNodeId, null, null, new MockTime(), null, new InMemAccountService(false, false));
storageManager.start();
MockReplicationManager replicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, dataNodeId, storeKeyConverterFactory, null);
ReplicaId replicaToTest = clusterMap.getReplicaIds(dataNodeId).get(0);
// Attempting to add replica that already exists should fail
assertFalse("Adding an existing replica should fail", replicationManager.addReplica(replicaToTest));
// Create a brand new replica that sits on one of the disk of datanode, add it into replication manager
PartitionId newPartition = clusterMap.createNewPartition(clusterMap.getDataNodes());
for (ReplicaId replicaId : newPartition.getReplicaIds()) {
if (replicaId.getDataNodeId() == dataNodeId) {
replicaToTest = replicaId;
break;
}
}
// Before adding replica, partitionToPartitionInfo and mountPathToPartitionInfos should not contain new partition
assertFalse("partitionToPartitionInfo should not contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
for (PartitionInfo partitionInfo : replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath())) {
assertNotSame("mountPathToPartitionInfos should not contain new partition", partitionInfo.getPartitionId(), newPartition);
}
// Add new replica to replication manager
assertTrue("Adding new replica to replication manager should succeed", replicationManager.addReplica(replicaToTest));
// After adding replica, partitionToPartitionInfo and mountPathToPartitionInfos should contain new partition
assertTrue("partitionToPartitionInfo should contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
Optional<PartitionInfo> newPartitionInfo = replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath()).stream().filter(partitionInfo -> partitionInfo.getPartitionId() == newPartition).findAny();
assertTrue("mountPathToPartitionInfos should contain new partition info", newPartitionInfo.isPresent());
// Verify that all remoteReplicaInfos of new added replica have assigned thread
for (RemoteReplicaInfo remoteReplicaInfo : newPartitionInfo.get().getRemoteReplicaInfos()) {
assertNotNull("The remote replica should be assigned to one replica thread", remoteReplicaInfo.getReplicaThread());
}
// Remove replica
assertTrue("Remove replica from replication manager should succeed", replicationManager.removeReplica(replicaToTest));
// Verify replica is removed, so partitionToPartitionInfo and mountPathToPartitionInfos should not contain new partition
assertFalse("partitionToPartitionInfo should not contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
for (PartitionInfo partitionInfo : replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath())) {
assertNotSame("mountPathToPartitionInfos should not contain new partition", partitionInfo.getPartitionId(), newPartition);
}
// Verify that none of remoteReplicaInfo should have assigned thread
for (RemoteReplicaInfo remoteReplicaInfo : newPartitionInfo.get().getRemoteReplicaInfos()) {
assertNull("The remote replica should be assigned to one replica thread", remoteReplicaInfo.getReplicaThread());
}
// Remove the same replica that doesn't exist should be no-op
ReplicationManager mockManager = Mockito.spy(replicationManager);
assertFalse("Remove non-existent replica should return false", replicationManager.removeReplica(replicaToTest));
verify(mockManager, never()).removeRemoteReplicaInfoFromReplicaThread(anyList());
storageManager.shutdown();
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete.
/**
* Tests when the lifeVersion in local is less than the lifeVersion in remote and the final state from remote
* is delete.
* @throws Exception
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
// 1 missing 2 Delete 3 Put(w/ or w/o ttl update)
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), 1);
// Adding a Put and Delete to remote but nothing in local
StoreKey id = ids.get(0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
idsToBeIgnoredByPartition.put(partitionId, id);
// Adding one Delete to remote and add delete to local but with lower lifeVersion
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add the same put to local host
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add same Put and a TtlUpdate to local host
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add same Put and a Delete and Undelete to local.
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
addUndeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 2, EXPIRY_TIME_MS);
ids.add(id);
idsByPartition.put(partitionId, ids);
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// It's all deletes, there is no missing key.
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(0, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
// Before exchange metadata, the number of message infos in local host is 7. Exchange metadata would add another 4(all deletes).
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 11, localInfoEntry.getValue().size());
}
for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
for (StoreKey id : idsEntry.getValue()) {
if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
assertTrue(localInfo.isDeleted());
assertTrue(remoteInfo.isDeleted());
assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
}
}
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ReplicationTest method limitMaxPartitionCountPerRequestTest.
/**
* Test that max partition count per request is honored in {@link ReplicaThread} if there are too many partitions to
* replicate from the remote node.
* @throws Exception
*/
@Test
public void limitMaxPartitionCountPerRequestTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
List<PartitionId> partitionIds = clusterMap.getAllPartitionIds(null);
for (PartitionId partitionId : partitionIds) {
// add 5 messages into each partition and place it on remote host only
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 5);
}
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
MockStoreKeyConverterFactory mockStoreKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
mockStoreKeyConverterFactory.setReturnInputIfAbsent(true);
mockStoreKeyConverterFactory.setConversionMap(new HashMap<>());
// we set batchSize to 10 in order to get all messages from one partition within single replication cycle
int batchSize = 10;
StoreKeyConverter storeKeyConverter = mockStoreKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
// we set max partition count per request to 5, which forces thread to replicate replicas in two cycles. (Note that
// number of partition to replicate is 10, they will be replicated in two batches)
ReplicationConfig initialReplicationConfig = replicationConfig;
properties.setProperty("replication.max.partition.count.per.request", String.valueOf(5));
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
CountDownLatch replicationCompleted = new CountDownLatch(partitionIds.size());
AtomicReference<Exception> exception = new AtomicReference<>();
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, (store, messageInfos) -> {
try {
replicationCompleted.countDown();
// for each partition, replication should complete within single cycle (fetch once should suffice), so
// we shut down local store once blobs are written. This can avoid unnecessary metadata requests sent to
// remote host.
store.shutdown();
} catch (Exception e) {
exception.set(e);
}
}, null);
ReplicaThread replicaThread = replicasAndThread.getSecond();
Thread thread = Utils.newThread(replicaThread, false);
thread.start();
assertTrue("Replication didn't complete within 10 secs", replicationCompleted.await(10, TimeUnit.SECONDS));
// verify the # of replicas per metadata request is limited to 5 (note that there are 10 replicas to replicate, they
// are split into to 2 small batches and get replicated in separate requests)
assertEquals("There should be 2 metadata requests and each has 5 replicas to replicate", Arrays.asList(5, 5), remoteHost.replicaCountPerRequestTracker);
// shutdown
replicaThread.shutdown();
if (exception.get() != null) {
throw exception.get();
}
replicationConfig = initialReplicationConfig;
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class ReplicationTest method replicaThreadSleepTest.
@Test
public void replicaThreadSleepTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
long expectedThrottleDurationMs = localHost.dataNodeId.getDatacenterName().equals(remoteHost.dataNodeId.getDatacenterName()) ? replicationConfig.replicationIntraReplicaThreadThrottleSleepDurationMs : replicationConfig.replicationInterReplicaThreadThrottleSleepDurationMs;
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = replicasAndThread.getFirst();
ReplicaThread replicaThread = replicasAndThread.getSecond();
// populate data, add 1 messages to both hosts.
for (PartitionId partitionId : clusterMap.getAllPartitionIds(null)) {
addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), 1);
}
// tests to verify replica thread throttling and idling functions in the following steps:
// 1. all replicas are in sync, thread level sleep and replica quarantine are both enabled.
// 2. add put messages to some replica and verify that replication for replicas remain disabled.
// 3. forward the time so replication for replicas are re-enabled and check replication resumes.
// 4. add more put messages to ensure replication happens continuously when needed and is throttled appropriately.
// 1. verify that the replica thread sleeps and replicas are temporarily disable when all replicas are synced.
List<List<RemoteReplicaInfo>> replicasToReplicateList = new ArrayList<>(replicasToReplicate.values());
// replicate is called and time is moved forward to prepare the replicas for testing.
replicaThread.replicate();
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs + 1);
long currentTimeMs = time.milliseconds();
replicaThread.replicate();
for (List<RemoteReplicaInfo> replicaInfos : replicasToReplicateList) {
for (RemoteReplicaInfo replicaInfo : replicaInfos) {
assertEquals("Unexpected re-enable replication time", currentTimeMs + replicationConfig.replicationSyncedReplicaBackoffDurationMs, replicaInfo.getReEnableReplicationTime());
}
}
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replicas are in sync, replica thread should sleep by replication.thread.idle.sleep.duration.ms", currentTimeMs + replicationConfig.replicationReplicaThreadIdleSleepDurationMs, time.milliseconds());
// 2. add 3 messages to a partition in the remote host only and verify replication for all replicas should be disabled.
PartitionId partitionId = clusterMap.getWritablePartitionIds(null).get(0);
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 3);
int[] missingKeys = new int[replicasToReplicate.get(remoteHost.dataNodeId).size()];
for (int i = 0; i < missingKeys.length; i++) {
missingKeys[i] = replicasToReplicate.get(remoteHost.dataNodeId).get(i).getReplicaId().getPartitionId().isEqual(partitionId.toPathString()) ? 3 : 0;
}
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replication for all replicas should be disabled and the thread should sleep", currentTimeMs + replicationConfig.replicationReplicaThreadIdleSleepDurationMs, time.milliseconds());
assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
// 3. forward the time and run replicate and verify the replication.
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
replicaThread.replicate();
missingKeys = new int[replicasToReplicate.get(remoteHost.dataNodeId).size()];
assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
// Since, now we moved setting of remoteReplicaInfo::setReEnableReplicationTime inside replicaThread::exchangeMetaData and
// above assertMissingKeys() does exchangeMetadata() for replicas up to date, each replica will have
// ReEnableReplicationTime set by replicationSyncedReplicaBackoffDurationMs. Forward the time here.
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
// 4. add more put messages and verify that replication continues and is throttled appropriately.
addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), 3);
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replica thread should sleep exactly " + expectedThrottleDurationMs + " since remote has new token", currentTimeMs + expectedThrottleDurationMs, time.milliseconds());
assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
// Since, now we moved setting of remoteReplicaInfo::setReEnableReplicationTime inside replicaThread::exchangeMetaData and
// above assertMissingKeys() does exchangeMetadata() for replicas up to date, each replica will have
// ReEnableReplicationTime set by replicationSyncedReplicaBackoffDurationMs. Forward the time here.
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
// verify that throttling on the replica thread is disabled when relevant configs are 0.
Properties properties = new Properties();
properties.setProperty("replication.intra.replica.thread.throttle.sleep.duration.ms", "0");
properties.setProperty("replication.inter.replica.thread.throttle.sleep.duration.ms", "0");
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
replicaThread = replicasAndThread.getSecond();
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replica thread should not sleep when throttling is disabled and replicas are out of sync", currentTimeMs, time.milliseconds());
}
Aggregations