use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class ReplicationTest method replicationLagMetricAndSyncUpTest.
/**
* Tests {@link ReplicationMetrics#getMaxLagForPartition(PartitionId)}
* @throws Exception
*/
@Test
public void replicationLagMetricAndSyncUpTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
AmbryReplicaSyncUpManager replicaSyncUpService = new AmbryReplicaSyncUpManager(clusterMapConfig);
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost1 = localAndRemoteHosts.getSecond();
// create another remoteHost2 that shares spacial partition with localHost and remoteHost1
PartitionId specialPartitionId = clusterMap.getWritablePartitionIds(MockClusterMap.SPECIAL_PARTITION_CLASS).get(0);
MockHost remoteHost2 = new MockHost(specialPartitionId.getReplicaIds().get(2).getDataNodeId(), clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
int batchSize = 4;
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
// add batchSize + 1 messages to the remoteHost1 so that two rounds of replication is needed.
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost1), batchSize + 1);
}
// add batchSize - 1 messages to the remoteHost2 so that localHost can catch up during one cycle of replication
for (ReplicaId replicaId : clusterMap.getReplicaIds(remoteHost2.dataNodeId)) {
addPutMessagesToReplicasOfPartition(replicaId.getPartitionId(), Collections.singletonList(remoteHost2), batchSize - 1);
}
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread1 = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost1, storeKeyConverter, transformer, null, replicaSyncUpService);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate1 = replicasAndThread1.getFirst();
ReplicaThread replicaThread1 = replicasAndThread1.getSecond();
// mock Bootstrap-To-Standby transition in ReplicationManager: 1. update store current state; 2. initiate bootstrap
replicasToReplicate1.get(remoteHost1.dataNodeId).forEach(info -> info.getLocalStore().setCurrentState(ReplicaState.BOOTSTRAP));
clusterMap.getReplicaIds(localHost.dataNodeId).forEach(replicaSyncUpService::initiateBootstrap);
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread1.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost1, batchSize), replicasToReplicate1.get(remoteHost1.dataNodeId));
replicaThread1.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost1, batchSize), replicasToReplicate1.get(remoteHost1.dataNodeId), response, false);
for (PartitionId partitionId : partitionIds) {
List<MessageInfo> allMessageInfos = localAndRemoteHosts.getSecond().infosByPartition.get(partitionId);
long expectedLag = allMessageInfos.subList(batchSize, allMessageInfos.size()).stream().mapToLong(MessageInfo::getSize).sum();
assertEquals("Replication lag doesn't match expected value", expectedLag, replicaThread1.getReplicationMetrics().getMaxLagForPartition(partitionId));
}
response = replicaThread1.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost1, batchSize), replicasToReplicate1.get(remoteHost1.dataNodeId));
replicaThread1.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost1, batchSize), replicasToReplicate1.get(remoteHost1.dataNodeId), response, false);
for (PartitionId partitionId : partitionIds) {
assertEquals("Replication lag should equal to 0", 0, replicaThread1.getReplicationMetrics().getMaxLagForPartition(partitionId));
}
// replicate with remoteHost2 to ensure special replica has caught up with enough peers
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread2 = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost2, storeKeyConverter, transformer, null, replicaSyncUpService);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate2 = replicasAndThread2.getFirst();
ReplicaThread replicaThread2 = replicasAndThread2.getSecond();
// initiate bootstrap on replica of special partition
RemoteReplicaInfo specialReplicaInfo = replicasToReplicate2.get(remoteHost2.dataNodeId).stream().filter(info -> info.getReplicaId().getPartitionId() == specialPartitionId).findFirst().get();
specialReplicaInfo.getLocalStore().setCurrentState(ReplicaState.BOOTSTRAP);
replicaSyncUpService.initiateBootstrap(specialReplicaInfo.getLocalReplicaId());
response = replicaThread2.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost2, batchSize), replicasToReplicate2.get(remoteHost2.dataNodeId));
replicaThread2.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost2, batchSize), replicasToReplicate2.get(remoteHost2.dataNodeId), response, false);
// verify replica of special partition has completed bootstrap and becomes standby
assertEquals("Store state is not expected", ReplicaState.STANDBY, specialReplicaInfo.getLocalStore().getCurrentState());
}
use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class ReplicationTest method replicationPauseTest.
/**
* Tests pausing replication for all and individual partitions. Also tests replication will pause on store that is not
* started and resume when store restarted.
* @throws Exception
*/
@Test
public void replicationPauseTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
List<PartitionId> partitionIds = clusterMap.getAllPartitionIds(null);
for (PartitionId partitionId : partitionIds) {
// add 10 messages to the remote host only
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 10);
}
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
StoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = replicasAndThread.getFirst();
ReplicaThread replicaThread = replicasAndThread.getSecond();
Map<PartitionId, Integer> progressTracker = new HashMap<>();
PartitionId partitionToResumeFirst = clusterMap.getAllPartitionIds(null).get(0);
PartitionId partitionToShutdownLocally = clusterMap.getAllPartitionIds(null).get(1);
boolean allStopped = false;
boolean onlyOneResumed = false;
boolean allReenabled = false;
boolean shutdownStoreRestarted = false;
Set<PartitionId> expectedPaused = new HashSet<>();
assertEquals("There should be no disabled partitions", expectedPaused, replicaThread.getReplicationDisabledPartitions());
while (true) {
replicaThread.replicate();
boolean replicationDone = true;
for (RemoteReplicaInfo replicaInfo : replicasToReplicate.get(remoteHost.dataNodeId)) {
PartitionId id = replicaInfo.getReplicaId().getPartitionId();
MockFindToken token = (MockFindToken) replicaInfo.getToken();
int lastProgress = progressTracker.computeIfAbsent(id, id1 -> 0);
int currentProgress = token.getIndex();
boolean partDone = currentProgress + 1 == remoteHost.infosByPartition.get(id).size();
if (allStopped || (onlyOneResumed && !id.equals(partitionToResumeFirst)) || (allReenabled && !shutdownStoreRestarted && id.equals(partitionToShutdownLocally))) {
assertEquals("There should have been no progress", lastProgress, currentProgress);
} else if (!partDone) {
assertTrue("There has been no progress", currentProgress > lastProgress);
progressTracker.put(id, currentProgress);
}
replicationDone = replicationDone && partDone;
}
if (!allStopped && !onlyOneResumed && !allReenabled && !shutdownStoreRestarted) {
replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), false);
expectedPaused.addAll(clusterMap.getAllPartitionIds(null));
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
allStopped = true;
} else if (!onlyOneResumed && !allReenabled && !shutdownStoreRestarted) {
// resume replication for first partition
replicaThread.controlReplicationForPartitions(Collections.singletonList(partitionIds.get(0)), true);
expectedPaused.remove(partitionIds.get(0));
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
allStopped = false;
onlyOneResumed = true;
} else if (!allReenabled && !shutdownStoreRestarted) {
// not removing the first partition
replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), true);
// shutdown one local store to pause replication against that store
localHost.storesByPartition.get(partitionToShutdownLocally).shutdown();
onlyOneResumed = false;
allReenabled = true;
expectedPaused.clear();
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
} else if (!shutdownStoreRestarted) {
localHost.storesByPartition.get(partitionToShutdownLocally).start();
shutdownStoreRestarted = true;
}
if (replicationDone) {
break;
}
}
Map<PartitionId, List<MessageInfo>> missingInfos = remoteHost.getMissingInfos(localHost.infosByPartition);
for (Map.Entry<PartitionId, List<MessageInfo>> entry : missingInfos.entrySet()) {
assertEquals("No infos should be missing", 0, entry.getValue().size());
}
Map<PartitionId, List<ByteBuffer>> missingBuffers = remoteHost.getMissingBuffers(localHost.buffersByPartition);
for (Map.Entry<PartitionId, List<ByteBuffer>> entry : missingBuffers.entrySet()) {
assertEquals("No buffers should be missing", 0, entry.getValue().size());
}
}
use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_NotDelete.
/**
* Tests when lifeVersion in local is less than the lifeVersion in remote and the final state is not
* delete, it would be Put, TtlUpdate or Undelete.
* @throws Exception
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_NotDelete() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
// 5 P0, D0 -> [U1]
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), (short) 1, 5);
for (StoreKey id : ids) {
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, (short) 1);
}
// 1 Missing
StoreKey id = ids.get(0);
// 2 P0 -> [U1, T1]
id = ids.get(1);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
// 3 P0, T0 -> [U1]
id = ids.get(2);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
// 4 P0, T0, D0 -> [U1]
id = ids.get(3);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, UPDATED_EXPIRY_TIME_MS);
// 5 P, D -> [U, T, D]
id = ids.get(4);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, UPDATED_EXPIRY_TIME_MS);
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// There is one missing key
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(1, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos, response, false);
// Before exchange metadata, the number of message infos in local host is 8. Exchange metadata would add another 8.
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 16, localInfoEntry.getValue().size());
}
for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
for (StoreKey id : idsEntry.getValue()) {
if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
assertTrue(localInfo.isDeleted());
assertTrue(remoteInfo.isDeleted());
assertTrue(localInfo.isTtlUpdated());
assertTrue(remoteInfo.isTtlUpdated());
assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
}
}
}
}
use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete.
/**
* Tests when the lifeVersion in local is less than the lifeVersion in remote and the final state from remote
* is delete.
* @throws Exception
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
// 1 missing 2 Delete 3 Put(w/ or w/o ttl update)
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), 1);
// Adding a Put and Delete to remote but nothing in local
StoreKey id = ids.get(0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
idsToBeIgnoredByPartition.put(partitionId, id);
// Adding one Delete to remote and add delete to local but with lower lifeVersion
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add the same put to local host
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add same Put and a TtlUpdate to local host
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add same Put and a Delete and Undelete to local.
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
addUndeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 2, EXPIRY_TIME_MS);
ids.add(id);
idsByPartition.put(partitionId, ids);
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// It's all deletes, there is no missing key.
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(0, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
// Before exchange metadata, the number of message infos in local host is 7. Exchange metadata would add another 4(all deletes).
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 11, localInfoEntry.getValue().size());
}
for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
for (StoreKey id : idsEntry.getValue()) {
if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
assertTrue(localInfo.isDeleted());
assertTrue(remoteInfo.isDeleted());
assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
}
}
}
}
use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class ReplicationTest method limitMaxPartitionCountPerRequestTest.
/**
* Test that max partition count per request is honored in {@link ReplicaThread} if there are too many partitions to
* replicate from the remote node.
* @throws Exception
*/
@Test
public void limitMaxPartitionCountPerRequestTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
List<PartitionId> partitionIds = clusterMap.getAllPartitionIds(null);
for (PartitionId partitionId : partitionIds) {
// add 5 messages into each partition and place it on remote host only
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 5);
}
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
MockStoreKeyConverterFactory mockStoreKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
mockStoreKeyConverterFactory.setReturnInputIfAbsent(true);
mockStoreKeyConverterFactory.setConversionMap(new HashMap<>());
// we set batchSize to 10 in order to get all messages from one partition within single replication cycle
int batchSize = 10;
StoreKeyConverter storeKeyConverter = mockStoreKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
// we set max partition count per request to 5, which forces thread to replicate replicas in two cycles. (Note that
// number of partition to replicate is 10, they will be replicated in two batches)
ReplicationConfig initialReplicationConfig = replicationConfig;
properties.setProperty("replication.max.partition.count.per.request", String.valueOf(5));
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
CountDownLatch replicationCompleted = new CountDownLatch(partitionIds.size());
AtomicReference<Exception> exception = new AtomicReference<>();
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, (store, messageInfos) -> {
try {
replicationCompleted.countDown();
// for each partition, replication should complete within single cycle (fetch once should suffice), so
// we shut down local store once blobs are written. This can avoid unnecessary metadata requests sent to
// remote host.
store.shutdown();
} catch (Exception e) {
exception.set(e);
}
}, null);
ReplicaThread replicaThread = replicasAndThread.getSecond();
Thread thread = Utils.newThread(replicaThread, false);
thread.start();
assertTrue("Replication didn't complete within 10 secs", replicationCompleted.await(10, TimeUnit.SECONDS));
// verify the # of replicas per metadata request is limited to 5 (note that there are 10 replicas to replicate, they
// are split into to 2 small batches and get replicated in separate requests)
assertEquals("There should be 2 metadata requests and each has 5 replicas to replicate", Arrays.asList(5, 5), remoteHost.replicaCountPerRequestTracker);
// shutdown
replicaThread.shutdown();
if (exception.get() != null) {
throw exception.get();
}
replicationConfig = initialReplicationConfig;
}
Aggregations