use of com.github.ambry.store.StoreKeyConverter in project ambry by linkedin.
the class ReplicationTest method replicationAllPauseTest.
/**
* Tests pausing all partitions and makes sure that the replica thread pauses. Also tests that it resumes when one
* eligible partition is re-enabled and that replication completes successfully.
* @throws Exception
*/
@Test
public void replicationAllPauseTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
List<PartitionId> partitionIds = clusterMap.getAllPartitionIds(null);
for (PartitionId partitionId : partitionIds) {
// add 10 messages into each partition and place it on remote host only
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 10);
}
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
MockStoreKeyConverterFactory mockStoreKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
mockStoreKeyConverterFactory.setReturnInputIfAbsent(true);
mockStoreKeyConverterFactory.setConversionMap(new HashMap<>());
int batchSize = 4;
StoreKeyConverter storeKeyConverter = mockStoreKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
CountDownLatch readyToPause = new CountDownLatch(1);
CountDownLatch readyToProceed = new CountDownLatch(1);
AtomicReference<CountDownLatch> reachedLimitLatch = new AtomicReference<>(new CountDownLatch(1));
AtomicReference<Exception> exception = new AtomicReference<>();
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, (store, messageInfos) -> {
try {
readyToPause.countDown();
readyToProceed.await();
if (store.messageInfos.size() == remoteHost.infosByPartition.get(store.id).size()) {
reachedLimitLatch.get().countDown();
}
} catch (Exception e) {
exception.set(e);
}
}, null);
ReplicaThread replicaThread = replicasAndThread.getSecond();
Thread thread = Utils.newThread(replicaThread, false);
thread.start();
assertEquals("There should be no disabled partitions", 0, replicaThread.getReplicationDisabledPartitions().size());
// wait to pause replication
readyToPause.await(10, TimeUnit.SECONDS);
replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), false);
Set<PartitionId> expectedPaused = new HashSet<>(clusterMap.getAllPartitionIds(null));
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
// signal the replica thread to move forward
readyToProceed.countDown();
// wait for the thread to go into waiting state
assertTrue("Replica thread did not go into waiting state", TestUtils.waitUntilExpectedState(thread, Thread.State.WAITING, 10000));
// unpause one partition
replicaThread.controlReplicationForPartitions(Collections.singletonList(partitionIds.get(0)), true);
expectedPaused.remove(partitionIds.get(0));
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
// wait for it to catch up
reachedLimitLatch.get().await(10, TimeUnit.SECONDS);
// reset limit
reachedLimitLatch.set(new CountDownLatch(partitionIds.size() - 1));
// unpause all partitions
replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), true);
assertEquals("There should be no disabled partitions", 0, replicaThread.getReplicationDisabledPartitions().size());
// wait until all catch up
reachedLimitLatch.get().await(10, TimeUnit.SECONDS);
// shutdown
replicaThread.shutdown();
if (exception.get() != null) {
throw exception.get();
}
Map<PartitionId, List<MessageInfo>> missingInfos = remoteHost.getMissingInfos(localHost.infosByPartition);
for (Map.Entry<PartitionId, List<MessageInfo>> entry : missingInfos.entrySet()) {
assertEquals("No infos should be missing", 0, entry.getValue().size());
}
Map<PartitionId, List<ByteBuffer>> missingBuffers = remoteHost.getMissingBuffers(localHost.buffersByPartition);
for (Map.Entry<PartitionId, List<ByteBuffer>> entry : missingBuffers.entrySet()) {
assertEquals("No buffers should be missing", 0, entry.getValue().size());
}
}
use of com.github.ambry.store.StoreKeyConverter in project ambry by linkedin.
the class MockHost method getMissingInfos.
/**
* Gets the message infos that are present in this host but missing in {@code other}.
* @param other the list of {@link MessageInfo} to check against.
* @return the message infos that are present in this host but missing in {@code other}.
*/
Map<PartitionId, List<MessageInfo>> getMissingInfos(Map<PartitionId, List<MessageInfo>> other, StoreKeyConverter storeKeyConverter) throws Exception {
Map<PartitionId, List<MessageInfo>> missingInfos = new HashMap<>();
for (Map.Entry<PartitionId, List<MessageInfo>> entry : infosByPartition.entrySet()) {
PartitionId partitionId = entry.getKey();
for (MessageInfo messageInfo : entry.getValue()) {
boolean found = false;
StoreKey convertedKey;
if (storeKeyConverter == null) {
convertedKey = messageInfo.getStoreKey();
} else {
Map<StoreKey, StoreKey> map = storeKeyConverter.convert(Collections.singletonList(messageInfo.getStoreKey()));
convertedKey = map.get(messageInfo.getStoreKey());
if (convertedKey == null) {
continue;
}
}
for (MessageInfo otherInfo : other.get(partitionId)) {
if (convertedKey.equals(otherInfo.getStoreKey()) && messageInfo.isDeleted() == otherInfo.isDeleted()) {
found = true;
break;
}
}
if (!found) {
missingInfos.computeIfAbsent(partitionId, partitionId1 -> new ArrayList<>()).add(messageInfo);
}
}
}
return missingInfos;
}
use of com.github.ambry.store.StoreKeyConverter in project ambry by linkedin.
the class ReplicationTestHelper method replicateAndVerify.
/**
* Replicate between local and remote hosts and verify the results on local host are expected.
* 1.remote host has different versions of blobIds and local host is empty;
* 2.remote and local hosts have different conversion maps (StoreKeyConverter).
* @param testSetup the {@link ReplicationTestSetup} used to provide test environment info.
* @param expectedStr the string presenting expected sequence of PUT, DELETE messages on local host.
* @throws Exception
*/
protected void replicateAndVerify(ReplicationTestSetup testSetup, String expectedStr) throws Exception {
PartitionId partitionId = testSetup.partitionIds.get(0);
List<RemoteReplicaInfo> singleReplicaList = testSetup.replicasToReplicate.get(testSetup.remoteHost.dataNodeId).stream().filter(e -> e.getReplicaId().getPartitionId() == partitionId).collect(Collectors.toList());
// Do the replica metadata exchange.
List<ReplicaThread.ExchangeMetadataResponse> responses = testSetup.replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(testSetup.remoteHost, 10, testSetup.remoteConversionMap), singleReplicaList);
// Do Get request to fix missing keys
testSetup.replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(testSetup.remoteHost, 10, testSetup.remoteConversionMap), singleReplicaList, responses, false);
// Verify
String[] expectedResults = expectedStr.equals("") ? new String[0] : expectedStr.split("\\s");
int size = testSetup.localHost.infosByPartition.get(partitionId).size();
assertEquals("Mismatch in number of messages on local host after replication", expectedResults.length, size);
for (int i = 0; i < size; ++i) {
String blobIdStr = testSetup.localHost.infosByPartition.get(partitionId).get(i).getStoreKey().toString();
boolean isDeleteMessage = testSetup.localHost.infosByPartition.get(partitionId).get(i).isDeleted();
switch(expectedResults[i]) {
case "OP":
assertEquals("Mismatch in blodId on local host after replication", testSetup.oldKey.toString(), blobIdStr);
assertFalse("Mismatch in message type on local host after replication", isDeleteMessage);
break;
case "OD":
assertEquals("Mismatch in blodId on local host after replication", testSetup.oldKey.toString(), blobIdStr);
assertTrue("Mismatch in message type on local host after replication", isDeleteMessage);
break;
case "NP":
assertEquals("Mismatch in blodId on local host after replication", testSetup.newKey.toString(), blobIdStr);
assertFalse("Mismatch in message type on local host after replication", isDeleteMessage);
break;
case "ND":
assertEquals("Mismatch in blodId on local host after replication", testSetup.newKey.toString(), blobIdStr);
assertTrue("Mismatch in message type on local host after replication", isDeleteMessage);
break;
}
}
}
Aggregations