use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class ReplicationTest method replicationLagMetricAndSyncUpTest.
/**
* Tests {@link ReplicationMetrics#getMaxLagForPartition(PartitionId)}
* @throws Exception
*/
@Test
public void replicationLagMetricAndSyncUpTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
AmbryReplicaSyncUpManager replicaSyncUpService = new AmbryReplicaSyncUpManager(clusterMapConfig);
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost1 = localAndRemoteHosts.getSecond();
// create another remoteHost2 that shares spacial partition with localHost and remoteHost1
PartitionId specialPartitionId = clusterMap.getWritablePartitionIds(MockClusterMap.SPECIAL_PARTITION_CLASS).get(0);
MockHost remoteHost2 = new MockHost(specialPartitionId.getReplicaIds().get(2).getDataNodeId(), clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
int batchSize = 4;
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
// add batchSize + 1 messages to the remoteHost1 so that two rounds of replication is needed.
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost1), batchSize + 1);
}
// add batchSize - 1 messages to the remoteHost2 so that localHost can catch up during one cycle of replication
for (ReplicaId replicaId : clusterMap.getReplicaIds(remoteHost2.dataNodeId)) {
addPutMessagesToReplicasOfPartition(replicaId.getPartitionId(), Collections.singletonList(remoteHost2), batchSize - 1);
}
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread1 = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost1, storeKeyConverter, transformer, null, replicaSyncUpService);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate1 = replicasAndThread1.getFirst();
ReplicaThread replicaThread1 = replicasAndThread1.getSecond();
// mock Bootstrap-To-Standby transition in ReplicationManager: 1. update store current state; 2. initiate bootstrap
replicasToReplicate1.get(remoteHost1.dataNodeId).forEach(info -> info.getLocalStore().setCurrentState(ReplicaState.BOOTSTRAP));
clusterMap.getReplicaIds(localHost.dataNodeId).forEach(replicaSyncUpService::initiateBootstrap);
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread1.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost1, batchSize), replicasToReplicate1.get(remoteHost1.dataNodeId));
replicaThread1.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost1, batchSize), replicasToReplicate1.get(remoteHost1.dataNodeId), response, false);
for (PartitionId partitionId : partitionIds) {
List<MessageInfo> allMessageInfos = localAndRemoteHosts.getSecond().infosByPartition.get(partitionId);
long expectedLag = allMessageInfos.subList(batchSize, allMessageInfos.size()).stream().mapToLong(MessageInfo::getSize).sum();
assertEquals("Replication lag doesn't match expected value", expectedLag, replicaThread1.getReplicationMetrics().getMaxLagForPartition(partitionId));
}
response = replicaThread1.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost1, batchSize), replicasToReplicate1.get(remoteHost1.dataNodeId));
replicaThread1.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost1, batchSize), replicasToReplicate1.get(remoteHost1.dataNodeId), response, false);
for (PartitionId partitionId : partitionIds) {
assertEquals("Replication lag should equal to 0", 0, replicaThread1.getReplicationMetrics().getMaxLagForPartition(partitionId));
}
// replicate with remoteHost2 to ensure special replica has caught up with enough peers
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread2 = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost2, storeKeyConverter, transformer, null, replicaSyncUpService);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate2 = replicasAndThread2.getFirst();
ReplicaThread replicaThread2 = replicasAndThread2.getSecond();
// initiate bootstrap on replica of special partition
RemoteReplicaInfo specialReplicaInfo = replicasToReplicate2.get(remoteHost2.dataNodeId).stream().filter(info -> info.getReplicaId().getPartitionId() == specialPartitionId).findFirst().get();
specialReplicaInfo.getLocalStore().setCurrentState(ReplicaState.BOOTSTRAP);
replicaSyncUpService.initiateBootstrap(specialReplicaInfo.getLocalReplicaId());
response = replicaThread2.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost2, batchSize), replicasToReplicate2.get(remoteHost2.dataNodeId));
replicaThread2.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost2, batchSize), replicasToReplicate2.get(remoteHost2.dataNodeId), response, false);
// verify replica of special partition has completed bootstrap and becomes standby
assertEquals("Store state is not expected", ReplicaState.STANDBY, specialReplicaInfo.getLocalStore().getCurrentState());
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class ReplicationTest method replicationPauseTest.
/**
* Tests pausing replication for all and individual partitions. Also tests replication will pause on store that is not
* started and resume when store restarted.
* @throws Exception
*/
@Test
public void replicationPauseTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
List<PartitionId> partitionIds = clusterMap.getAllPartitionIds(null);
for (PartitionId partitionId : partitionIds) {
// add 10 messages to the remote host only
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 10);
}
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
StoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = replicasAndThread.getFirst();
ReplicaThread replicaThread = replicasAndThread.getSecond();
Map<PartitionId, Integer> progressTracker = new HashMap<>();
PartitionId partitionToResumeFirst = clusterMap.getAllPartitionIds(null).get(0);
PartitionId partitionToShutdownLocally = clusterMap.getAllPartitionIds(null).get(1);
boolean allStopped = false;
boolean onlyOneResumed = false;
boolean allReenabled = false;
boolean shutdownStoreRestarted = false;
Set<PartitionId> expectedPaused = new HashSet<>();
assertEquals("There should be no disabled partitions", expectedPaused, replicaThread.getReplicationDisabledPartitions());
while (true) {
replicaThread.replicate();
boolean replicationDone = true;
for (RemoteReplicaInfo replicaInfo : replicasToReplicate.get(remoteHost.dataNodeId)) {
PartitionId id = replicaInfo.getReplicaId().getPartitionId();
MockFindToken token = (MockFindToken) replicaInfo.getToken();
int lastProgress = progressTracker.computeIfAbsent(id, id1 -> 0);
int currentProgress = token.getIndex();
boolean partDone = currentProgress + 1 == remoteHost.infosByPartition.get(id).size();
if (allStopped || (onlyOneResumed && !id.equals(partitionToResumeFirst)) || (allReenabled && !shutdownStoreRestarted && id.equals(partitionToShutdownLocally))) {
assertEquals("There should have been no progress", lastProgress, currentProgress);
} else if (!partDone) {
assertTrue("There has been no progress", currentProgress > lastProgress);
progressTracker.put(id, currentProgress);
}
replicationDone = replicationDone && partDone;
}
if (!allStopped && !onlyOneResumed && !allReenabled && !shutdownStoreRestarted) {
replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), false);
expectedPaused.addAll(clusterMap.getAllPartitionIds(null));
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
allStopped = true;
} else if (!onlyOneResumed && !allReenabled && !shutdownStoreRestarted) {
// resume replication for first partition
replicaThread.controlReplicationForPartitions(Collections.singletonList(partitionIds.get(0)), true);
expectedPaused.remove(partitionIds.get(0));
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
allStopped = false;
onlyOneResumed = true;
} else if (!allReenabled && !shutdownStoreRestarted) {
// not removing the first partition
replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), true);
// shutdown one local store to pause replication against that store
localHost.storesByPartition.get(partitionToShutdownLocally).shutdown();
onlyOneResumed = false;
allReenabled = true;
expectedPaused.clear();
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
} else if (!shutdownStoreRestarted) {
localHost.storesByPartition.get(partitionToShutdownLocally).start();
shutdownStoreRestarted = true;
}
if (replicationDone) {
break;
}
}
Map<PartitionId, List<MessageInfo>> missingInfos = remoteHost.getMissingInfos(localHost.infosByPartition);
for (Map.Entry<PartitionId, List<MessageInfo>> entry : missingInfos.entrySet()) {
assertEquals("No infos should be missing", 0, entry.getValue().size());
}
Map<PartitionId, List<ByteBuffer>> missingBuffers = remoteHost.getMissingBuffers(localHost.buffersByPartition);
for (Map.Entry<PartitionId, List<ByteBuffer>> entry : missingBuffers.entrySet()) {
assertEquals("No buffers should be missing", 0, entry.getValue().size());
}
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_NotDelete.
/**
* Tests when lifeVersion in local is less than the lifeVersion in remote and the final state is not
* delete, it would be Put, TtlUpdate or Undelete.
* @throws Exception
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_NotDelete() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
// 5 P0, D0 -> [U1]
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), (short) 1, 5);
for (StoreKey id : ids) {
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, (short) 1);
}
// 1 Missing
StoreKey id = ids.get(0);
// 2 P0 -> [U1, T1]
id = ids.get(1);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
// 3 P0, T0 -> [U1]
id = ids.get(2);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
// 4 P0, T0, D0 -> [U1]
id = ids.get(3);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, UPDATED_EXPIRY_TIME_MS);
// 5 P, D -> [U, T, D]
id = ids.get(4);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, UPDATED_EXPIRY_TIME_MS);
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// There is one missing key
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(1, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos, response, false);
// Before exchange metadata, the number of message infos in local host is 8. Exchange metadata would add another 8.
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 16, localInfoEntry.getValue().size());
}
for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
for (StoreKey id : idsEntry.getValue()) {
if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
assertTrue(localInfo.isDeleted());
assertTrue(remoteInfo.isDeleted());
assertTrue(localInfo.isTtlUpdated());
assertTrue(remoteInfo.isTtlUpdated());
assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
}
}
}
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class ReplicationTest method addAndRemoveReplicaTest.
/**
* Test dynamically add/remove replica in {@link ReplicationManager}
* @throws Exception
*/
@Test
public void addAndRemoveReplicaTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
StoreConfig storeConfig = new StoreConfig(verifiableProperties);
DataNodeId dataNodeId = clusterMap.getDataNodeIds().get(0);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
StorageManager storageManager = new StorageManager(storeConfig, new DiskManagerConfig(verifiableProperties), Utils.newScheduler(1, true), new MetricRegistry(), null, clusterMap, dataNodeId, null, null, new MockTime(), null, new InMemAccountService(false, false));
storageManager.start();
MockReplicationManager replicationManager = new MockReplicationManager(replicationConfig, clusterMapConfig, storeConfig, storageManager, clusterMap, dataNodeId, storeKeyConverterFactory, null);
ReplicaId replicaToTest = clusterMap.getReplicaIds(dataNodeId).get(0);
// Attempting to add replica that already exists should fail
assertFalse("Adding an existing replica should fail", replicationManager.addReplica(replicaToTest));
// Create a brand new replica that sits on one of the disk of datanode, add it into replication manager
PartitionId newPartition = clusterMap.createNewPartition(clusterMap.getDataNodes());
for (ReplicaId replicaId : newPartition.getReplicaIds()) {
if (replicaId.getDataNodeId() == dataNodeId) {
replicaToTest = replicaId;
break;
}
}
// Before adding replica, partitionToPartitionInfo and mountPathToPartitionInfos should not contain new partition
assertFalse("partitionToPartitionInfo should not contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
for (PartitionInfo partitionInfo : replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath())) {
assertNotSame("mountPathToPartitionInfos should not contain new partition", partitionInfo.getPartitionId(), newPartition);
}
// Add new replica to replication manager
assertTrue("Adding new replica to replication manager should succeed", replicationManager.addReplica(replicaToTest));
// After adding replica, partitionToPartitionInfo and mountPathToPartitionInfos should contain new partition
assertTrue("partitionToPartitionInfo should contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
Optional<PartitionInfo> newPartitionInfo = replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath()).stream().filter(partitionInfo -> partitionInfo.getPartitionId() == newPartition).findAny();
assertTrue("mountPathToPartitionInfos should contain new partition info", newPartitionInfo.isPresent());
// Verify that all remoteReplicaInfos of new added replica have assigned thread
for (RemoteReplicaInfo remoteReplicaInfo : newPartitionInfo.get().getRemoteReplicaInfos()) {
assertNotNull("The remote replica should be assigned to one replica thread", remoteReplicaInfo.getReplicaThread());
}
// Remove replica
assertTrue("Remove replica from replication manager should succeed", replicationManager.removeReplica(replicaToTest));
// Verify replica is removed, so partitionToPartitionInfo and mountPathToPartitionInfos should not contain new partition
assertFalse("partitionToPartitionInfo should not contain new partition", replicationManager.getPartitionToPartitionInfoMap().containsKey(newPartition));
for (PartitionInfo partitionInfo : replicationManager.getMountPathToPartitionInfosMap().get(replicaToTest.getMountPath())) {
assertNotSame("mountPathToPartitionInfos should not contain new partition", partitionInfo.getPartitionId(), newPartition);
}
// Verify that none of remoteReplicaInfo should have assigned thread
for (RemoteReplicaInfo remoteReplicaInfo : newPartitionInfo.get().getRemoteReplicaInfos()) {
assertNull("The remote replica should be assigned to one replica thread", remoteReplicaInfo.getReplicaThread());
}
// Remove the same replica that doesn't exist should be no-op
ReplicationManager mockManager = Mockito.spy(replicationManager);
assertFalse("Remove non-existent replica should return false", replicationManager.removeReplica(replicaToTest));
verify(mockManager, never()).removeRemoteReplicaInfoFromReplicaThread(anyList());
storageManager.shutdown();
}
use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete.
/**
* Tests when the lifeVersion in local is less than the lifeVersion in remote and the final state from remote
* is delete.
* @throws Exception
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
// 1 missing 2 Delete 3 Put(w/ or w/o ttl update)
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), 1);
// Adding a Put and Delete to remote but nothing in local
StoreKey id = ids.get(0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
idsToBeIgnoredByPartition.put(partitionId, id);
// Adding one Delete to remote and add delete to local but with lower lifeVersion
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add the same put to local host
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add same Put and a TtlUpdate to local host
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add same Put and a Delete and Undelete to local.
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
addUndeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 2, EXPIRY_TIME_MS);
ids.add(id);
idsByPartition.put(partitionId, ids);
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// It's all deletes, there is no missing key.
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(0, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
// Before exchange metadata, the number of message infos in local host is 7. Exchange metadata would add another 4(all deletes).
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 11, localInfoEntry.getValue().size());
}
for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
for (StoreKey id : idsEntry.getValue()) {
if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
assertTrue(localInfo.isDeleted());
assertTrue(remoteInfo.isDeleted());
assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
}
}
}
}
Aggregations