use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class ReplicationTest method replicaThreadSleepTest.
@Test
public void replicaThreadSleepTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
long expectedThrottleDurationMs = localHost.dataNodeId.getDatacenterName().equals(remoteHost.dataNodeId.getDatacenterName()) ? replicationConfig.replicationIntraReplicaThreadThrottleSleepDurationMs : replicationConfig.replicationInterReplicaThreadThrottleSleepDurationMs;
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = replicasAndThread.getFirst();
ReplicaThread replicaThread = replicasAndThread.getSecond();
// populate data, add 1 messages to both hosts.
for (PartitionId partitionId : clusterMap.getAllPartitionIds(null)) {
addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), 1);
}
// tests to verify replica thread throttling and idling functions in the following steps:
// 1. all replicas are in sync, thread level sleep and replica quarantine are both enabled.
// 2. add put messages to some replica and verify that replication for replicas remain disabled.
// 3. forward the time so replication for replicas are re-enabled and check replication resumes.
// 4. add more put messages to ensure replication happens continuously when needed and is throttled appropriately.
// 1. verify that the replica thread sleeps and replicas are temporarily disable when all replicas are synced.
List<List<RemoteReplicaInfo>> replicasToReplicateList = new ArrayList<>(replicasToReplicate.values());
// replicate is called and time is moved forward to prepare the replicas for testing.
replicaThread.replicate();
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs + 1);
long currentTimeMs = time.milliseconds();
replicaThread.replicate();
for (List<RemoteReplicaInfo> replicaInfos : replicasToReplicateList) {
for (RemoteReplicaInfo replicaInfo : replicaInfos) {
assertEquals("Unexpected re-enable replication time", currentTimeMs + replicationConfig.replicationSyncedReplicaBackoffDurationMs, replicaInfo.getReEnableReplicationTime());
}
}
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replicas are in sync, replica thread should sleep by replication.thread.idle.sleep.duration.ms", currentTimeMs + replicationConfig.replicationReplicaThreadIdleSleepDurationMs, time.milliseconds());
// 2. add 3 messages to a partition in the remote host only and verify replication for all replicas should be disabled.
PartitionId partitionId = clusterMap.getWritablePartitionIds(null).get(0);
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 3);
int[] missingKeys = new int[replicasToReplicate.get(remoteHost.dataNodeId).size()];
for (int i = 0; i < missingKeys.length; i++) {
missingKeys[i] = replicasToReplicate.get(remoteHost.dataNodeId).get(i).getReplicaId().getPartitionId().isEqual(partitionId.toPathString()) ? 3 : 0;
}
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replication for all replicas should be disabled and the thread should sleep", currentTimeMs + replicationConfig.replicationReplicaThreadIdleSleepDurationMs, time.milliseconds());
assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
// 3. forward the time and run replicate and verify the replication.
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
replicaThread.replicate();
missingKeys = new int[replicasToReplicate.get(remoteHost.dataNodeId).size()];
assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
// Since, now we moved setting of remoteReplicaInfo::setReEnableReplicationTime inside replicaThread::exchangeMetaData and
// above assertMissingKeys() does exchangeMetadata() for replicas up to date, each replica will have
// ReEnableReplicationTime set by replicationSyncedReplicaBackoffDurationMs. Forward the time here.
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
// 4. add more put messages and verify that replication continues and is throttled appropriately.
addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), 3);
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replica thread should sleep exactly " + expectedThrottleDurationMs + " since remote has new token", currentTimeMs + expectedThrottleDurationMs, time.milliseconds());
assertMissingKeys(missingKeys, batchSize, replicaThread, remoteHost, replicasToReplicate);
// Since, now we moved setting of remoteReplicaInfo::setReEnableReplicationTime inside replicaThread::exchangeMetaData and
// above assertMissingKeys() does exchangeMetadata() for replicas up to date, each replica will have
// ReEnableReplicationTime set by replicationSyncedReplicaBackoffDurationMs. Forward the time here.
time.sleep(replicationConfig.replicationSyncedReplicaBackoffDurationMs);
// verify that throttling on the replica thread is disabled when relevant configs are 0.
Properties properties = new Properties();
properties.setProperty("replication.intra.replica.thread.throttle.sleep.duration.ms", "0");
properties.setProperty("replication.inter.replica.thread.throttle.sleep.duration.ms", "0");
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
replicaThread = replicasAndThread.getSecond();
currentTimeMs = time.milliseconds();
replicaThread.replicate();
assertEquals("Replica thread should not sleep when throttling is disabled and replicas are out of sync", currentTimeMs, time.milliseconds());
}
use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class CloudBlobStoreTest method testPutWithTtl.
/**
* Test PUT(with TTL) and TtlUpdate record replication.
* Replication may happen after PUT and after TtlUpdate, or after TtlUpdate only.
* PUT may already expired, expiration time < upload threshold or expiration time >= upload threshold.
* @throws Exception
*/
@Test
public void testPutWithTtl() throws Exception {
// Set up remote host
MockClusterMap clusterMap = new MockClusterMap();
MockHost remoteHost = getLocalAndRemoteHosts(clusterMap).getSecond();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
PartitionId partitionId = partitionIds.get(0);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<DataNodeId, MockHost> hosts = new HashMap<>();
hosts.put(remoteHost.dataNodeId, remoteHost);
MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
// Generate BlobIds for following PUT.
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
List<BlobId> blobIdList = new ArrayList<>();
for (int i = 0; i < 6; i++) {
blobIdList.add(new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK));
}
// Set up VCR
Properties props = new Properties();
setBasicProperties(props);
props.setProperty("clustermap.port", "12300");
props.setProperty("vcr.ssl.port", "12345");
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
CloudDataNode cloudDataNode = new CloudDataNode(cloudConfig, clusterMapConfig);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIdList, clusterMap);
CloudReplica cloudReplica = new CloudReplica(partitionId, cloudDataNode);
CloudBlobStore cloudBlobStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, latchBasedInMemoryCloudDestination, clusterMap, new VcrMetrics(new MetricRegistry()));
cloudBlobStore.start();
// Create ReplicaThread and add RemoteReplicaInfo to it.
ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), Collections.emptyList());
ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), cloudDataNode, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, cloudDataNode.getDatacenterName(), new ResponseHandler(clusterMap), new MockTime(), null, null, null);
for (ReplicaId replica : partitionId.getReplicaIds()) {
if (replica.getDataNodeId() == remoteHost.dataNodeId) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(replica, cloudReplica, cloudBlobStore, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(remoteHost.dataNodeId.getPort(), PortType.PLAINTEXT));
replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
break;
}
}
long referenceTime = System.currentTimeMillis();
// Case 1: Put already expired. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
BlobId id = blobIdList.get(0);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
replicaThread.replicate();
assertFalse("Blob should not exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 2: Put already expired. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(1);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 3: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(2);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
replicaThread.replicate();
if (isVcr) {
assertFalse("Blob should not exist (vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
} else {
assertTrue("Blob should exist (not vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
}
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 4: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(3);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 5: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud after Put and update ttl after TtlUpdate.
id = blobIdList.get(4);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
replicaThread.replicate();
assertTrue(latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 6: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud after TtlUpdate.
id = blobIdList.get(5);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Verify expiration time of all blobs.
Map<String, CloudBlobMetadata> map = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIdList);
for (BlobId blobId : blobIdList) {
assertEquals("Blob ttl should be infinite now.", Utils.Infinite_Time, map.get(blobId.toString()).getExpirationTime());
}
}
use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class ReplicationTest method replicaThreadTestConverter.
/**
* Tests that replication between a local and remote server who have different
* blob IDs for the same blobs (via StoreKeyConverter)
* @throws Exception
*/
@Test
public void replicaThreadTestConverter() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockHost expectedLocalHost = new MockHost(localHost.dataNodeId, clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
Map<PartitionId, List<StoreKey>> idsToBeIgnoredByPartition = new HashMap<>();
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = replicasAndThread.getFirst();
ReplicaThread replicaThread = replicasAndThread.getSecond();
/*
STORE KEY CONVERTER MAPPING
Key Value
B0 B0'
B1 B1'
B2 null
BEFORE
Local Remote
B0' B0
B1
B2
AFTER
Local Remote
B0' B0
B1' B1
B2
B0 is B0' for local,
B1 is B1' for local,
B2 is null for local,
so it already has B0/B0'
B1 is transferred to B1'
and B2 is invalid for L
so it does not count as missing
Missing Keys: 1
*/
Map<PartitionId, List<BlobId>> partitionIdToDeleteBlobId = new HashMap<>();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
Map<PartitionId, BlobId> expectedPartitionIdToDeleteBlobId = new HashMap<>();
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<BlobId> deleteBlobIds = new ArrayList<>();
partitionIdToDeleteBlobId.put(partitionId, deleteBlobIds);
BlobId b0 = generateRandomBlobId(partitionId);
deleteBlobIds.add(b0);
BlobId b0p = generateRandomBlobId(partitionId);
expectedPartitionIdToDeleteBlobId.put(partitionId, b0p);
BlobId b1 = generateRandomBlobId(partitionId);
BlobId b1p = generateRandomBlobId(partitionId);
BlobId b2 = generateRandomBlobId(partitionId);
deleteBlobIds.add(b2);
conversionMap.put(b0, b0p);
conversionMap.put(b1, b1p);
conversionMap.put(b2, null);
// Convert current conversion map so that BlobIdTransformer can
// create b1p in expectedLocalHost
storeKeyConverter.setConversionMap(conversionMap);
storeKeyConverter.convert(conversionMap.keySet());
addPutMessagesToReplicasOfPartition(Arrays.asList(b0p), Arrays.asList(localHost));
addPutMessagesToReplicasOfPartition(Arrays.asList(b0, b1, b2), Arrays.asList(remoteHost));
addPutMessagesToReplicasOfPartition(Arrays.asList(b0p, b1), Arrays.asList(null, transformer), Arrays.asList(expectedLocalHost));
// Check that expected local host contains the correct blob ids
Set<BlobId> expectedLocalHostBlobIds = new HashSet<>();
expectedLocalHostBlobIds.add(b0p);
expectedLocalHostBlobIds.add(b1p);
for (MessageInfo messageInfo : expectedLocalHost.infosByPartition.get(partitionId)) {
assertTrue("Remove should never fail", expectedLocalHostBlobIds.remove(messageInfo.getStoreKey()));
}
assertTrue("expectedLocalHostBlobIds should now be empty", expectedLocalHostBlobIds.isEmpty());
}
storeKeyConverter.setConversionMap(conversionMap);
int expectedIndex = assertMissingKeysAndFixMissingStoreKeys(0, 2, batchSize, 1, replicaThread, remoteHost, replicasToReplicate);
// Check that there are no missing buffers between expectedLocalHost and LocalHost
Map<PartitionId, List<ByteBuffer>> missingBuffers = expectedLocalHost.getMissingBuffers(localHost.buffersByPartition);
assertTrue(missingBuffers.isEmpty());
missingBuffers = localHost.getMissingBuffers(expectedLocalHost.buffersByPartition);
assertTrue(missingBuffers.isEmpty());
// delete blob
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<BlobId> deleteBlobIds = partitionIdToDeleteBlobId.get(partitionId);
for (BlobId deleteBlobId : deleteBlobIds) {
addDeleteMessagesToReplicasOfPartition(partitionId, deleteBlobId, Arrays.asList(remoteHost));
}
addDeleteMessagesToReplicasOfPartition(partitionId, expectedPartitionIdToDeleteBlobId.get(partitionId), Arrays.asList(expectedLocalHost));
}
expectedIndex = assertMissingKeysAndFixMissingStoreKeys(expectedIndex, 2, batchSize, 0, replicaThread, remoteHost, replicasToReplicate);
// Check that there are no missing buffers between expectedLocalHost and LocalHost
missingBuffers = expectedLocalHost.getMissingBuffers(localHost.buffersByPartition);
assertTrue(missingBuffers.isEmpty());
missingBuffers = localHost.getMissingBuffers(expectedLocalHost.buffersByPartition);
assertTrue(missingBuffers.isEmpty());
// 3 unconverted + 2 unconverted deleted expected missing buffers
verifyNoMoreMissingKeysAndExpectedMissingBufferCount(remoteHost, localHost, replicaThread, replicasToReplicate, idsToBeIgnoredByPartition, storeKeyConverter, expectedIndex, expectedIndex, 5);
}
use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_MissingPuts.
/**
* Tests when the local store missing put records with lifeVersion greater than 0
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_MissingPuts() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
Map<PartitionId, List<StoreKey>> idsToBeIgnoredByPartition = new HashMap<>();
Map<PartitionId, List<StoreKey>> idsToBeTtlUpdatedByPartition = new HashMap<>();
short lifeVersion = 1;
for (int i = 0; i < partitionIds.size(); i++) {
List<StoreKey> toBeIgnored = new ArrayList<>();
List<StoreKey> toBeTtlUpdated = new ArrayList<>();
PartitionId partitionId = partitionIds.get(i);
// Adding 1 put to remoteHost at lifeVersion 0
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 1);
// Adding 1 put to remoteHost at lifeVersion 1
ids.addAll(addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), lifeVersion, 1));
// Adding one put to remoteHost at lifeVersion 1, which would be ttl updated later at lifeVersion 1
StoreKey toTtlUpdateId = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), lifeVersion, 1).get(0);
ids.add(toTtlUpdateId);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, toTtlUpdateId, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, lifeVersion);
toBeTtlUpdated.add(toTtlUpdateId);
// Adding one put to remoteHost at lifeVersion 0, which would be ttl updated later at lifeVersion 1
toTtlUpdateId = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 1).get(0);
ids.add(toTtlUpdateId);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, toTtlUpdateId, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, lifeVersion);
toBeTtlUpdated.add(toTtlUpdateId);
// Adding one put to remoteHost, which would be deleted later
StoreKey toDeleteId = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), lifeVersion, 1).get(0);
ids.add(toDeleteId);
addDeleteMessagesToReplicasOfPartition(partitionId, toDeleteId, Collections.singletonList(remoteHost), lifeVersion, EXPIRY_TIME_MS);
toBeIgnored.add(toDeleteId);
// Adding one put to remoteHost, which would be ttl updated and deleted later
StoreKey toDeleteAndTtlUpdateId = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), lifeVersion, 1).get(0);
ids.add(toDeleteAndTtlUpdateId);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, toDeleteAndTtlUpdateId, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, lifeVersion);
toBeTtlUpdated.add(toDeleteAndTtlUpdateId);
addDeleteMessagesToReplicasOfPartition(partitionId, toDeleteAndTtlUpdateId, Collections.singletonList(remoteHost), lifeVersion, UPDATED_EXPIRY_TIME_MS);
toBeIgnored.add(toDeleteAndTtlUpdateId);
// Adding one put to remoteHost at lifeVersion 0, delete it and then add undelete at lifeVersion 1
StoreKey deleteAndUndeleteId = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 1).get(0);
ids.add(deleteAndUndeleteId);
addDeleteMessagesToReplicasOfPartition(partitionId, deleteAndUndeleteId, Collections.singletonList(remoteHost), (short) 0, EXPIRY_TIME_MS);
addUndeleteMessagesToReplicasOfPartition(partitionId, deleteAndUndeleteId, Collections.singletonList(remoteHost), lifeVersion);
idsToBeIgnoredByPartition.put(partitionId, toBeIgnored);
idsToBeTtlUpdatedByPartition.put(partitionId, toBeTtlUpdated);
// Adding one put to both remote and local host.
ids.addAll(addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(localHost, remoteHost), lifeVersion, 1));
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
int missingKeyCount = 5;
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(missingKeyCount, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos, response, false);
for (int i = 0; i < response.size(); i++) {
assertEquals("Token should have been set correctly in fixMissingStoreKeys()", response.get(i).remoteToken, remoteReplicaInfos.get(i).getToken());
}
// Don't compare buffers here, PutBuffer might be different since we might change the lifeVersion.
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 8, localInfoEntry.getValue().size());
}
checkBlobMessagesAreEqualInLocalAndRemoteHosts(localHost, remoteHost, idsToBeIgnoredByPartition, idsToBeTtlUpdatedByPartition);
}
use of com.github.ambry.store.Transformer in project ambry by linkedin.
the class ReplicationTest method replicationAllPauseTest.
/**
* Tests pausing all partitions and makes sure that the replica thread pauses. Also tests that it resumes when one
* eligible partition is re-enabled and that replication completes successfully.
* @throws Exception
*/
@Test
public void replicationAllPauseTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
List<PartitionId> partitionIds = clusterMap.getAllPartitionIds(null);
for (PartitionId partitionId : partitionIds) {
// add 10 messages into each partition and place it on remote host only
addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 10);
}
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
MockStoreKeyConverterFactory mockStoreKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
mockStoreKeyConverterFactory.setReturnInputIfAbsent(true);
mockStoreKeyConverterFactory.setConversionMap(new HashMap<>());
int batchSize = 4;
StoreKeyConverter storeKeyConverter = mockStoreKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new ValidatingTransformer(storeKeyFactory, storeKeyConverter);
CountDownLatch readyToPause = new CountDownLatch(1);
CountDownLatch readyToProceed = new CountDownLatch(1);
AtomicReference<CountDownLatch> reachedLimitLatch = new AtomicReference<>(new CountDownLatch(1));
AtomicReference<Exception> exception = new AtomicReference<>();
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, (store, messageInfos) -> {
try {
readyToPause.countDown();
readyToProceed.await();
if (store.messageInfos.size() == remoteHost.infosByPartition.get(store.id).size()) {
reachedLimitLatch.get().countDown();
}
} catch (Exception e) {
exception.set(e);
}
}, null);
ReplicaThread replicaThread = replicasAndThread.getSecond();
Thread thread = Utils.newThread(replicaThread, false);
thread.start();
assertEquals("There should be no disabled partitions", 0, replicaThread.getReplicationDisabledPartitions().size());
// wait to pause replication
readyToPause.await(10, TimeUnit.SECONDS);
replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), false);
Set<PartitionId> expectedPaused = new HashSet<>(clusterMap.getAllPartitionIds(null));
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
// signal the replica thread to move forward
readyToProceed.countDown();
// wait for the thread to go into waiting state
assertTrue("Replica thread did not go into waiting state", TestUtils.waitUntilExpectedState(thread, Thread.State.WAITING, 10000));
// unpause one partition
replicaThread.controlReplicationForPartitions(Collections.singletonList(partitionIds.get(0)), true);
expectedPaused.remove(partitionIds.get(0));
assertEquals("Disabled partitions sets do not match", expectedPaused, replicaThread.getReplicationDisabledPartitions());
// wait for it to catch up
reachedLimitLatch.get().await(10, TimeUnit.SECONDS);
// reset limit
reachedLimitLatch.set(new CountDownLatch(partitionIds.size() - 1));
// unpause all partitions
replicaThread.controlReplicationForPartitions(clusterMap.getAllPartitionIds(null), true);
assertEquals("There should be no disabled partitions", 0, replicaThread.getReplicationDisabledPartitions().size());
// wait until all catch up
reachedLimitLatch.get().await(10, TimeUnit.SECONDS);
// shutdown
replicaThread.shutdown();
if (exception.get() != null) {
throw exception.get();
}
Map<PartitionId, List<MessageInfo>> missingInfos = remoteHost.getMissingInfos(localHost.infosByPartition);
for (Map.Entry<PartitionId, List<MessageInfo>> entry : missingInfos.entrySet()) {
assertEquals("No infos should be missing", 0, entry.getValue().size());
}
Map<PartitionId, List<ByteBuffer>> missingBuffers = remoteHost.getMissingBuffers(localHost.buffersByPartition);
for (Map.Entry<PartitionId, List<ByteBuffer>> entry : missingBuffers.entrySet()) {
assertEquals("No buffers should be missing", 0, entry.getValue().size());
}
}
Aggregations