use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class BlobIdTransformer method warmup.
@Override
public void warmup(List<MessageInfo> messageInfos) throws Exception {
List<StoreKey> storeKeys = new ArrayList<>();
for (MessageInfo messageInfo : messageInfos) {
if (!messageInfo.isExpired() && !messageInfo.isDeleted()) {
storeKeys.add(messageInfo.getStoreKey());
}
}
storeKeyConverter.dropCache();
storeKeyConverter.convert(storeKeys);
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class BlobIdTransformer method newMessage.
/**
* Creates a Message from the old Message
* input stream, replacing the old store key and account/container IDs
* with a new store key and account/container IDs
* @param inputStream the input stream of the Message
* @param newKey the new StoreKey
* @param oldMessageInfo the {@link MessageInfo} of the message being transformed
* @return new Message message
* @throws Exception
*/
private Message newMessage(InputStream inputStream, StoreKey newKey, MessageInfo oldMessageInfo) throws Exception {
MessageHeader_Format headerFormat = getMessageHeader(inputStream);
storeKeyFactory.getStoreKey(new DataInputStream(inputStream));
BlobId newBlobId = (BlobId) newKey;
if (headerFormat.isPutRecord()) {
if (headerFormat.hasLifeVersion() && headerFormat.getLifeVersion() != oldMessageInfo.getLifeVersion()) {
// The original Put buffer might have lifeVersion as 0, but the message info might have a higher lifeVersion.
logger.trace("LifeVersion in stream: {} failed to match lifeVersion from Index: {} for key {}", headerFormat.getLifeVersion(), oldMessageInfo.getLifeVersion(), oldMessageInfo.getStoreKey());
}
ByteBuffer blobEncryptionKey = null;
if (headerFormat.hasEncryptionKeyRecord()) {
blobEncryptionKey = deserializeBlobEncryptionKey(inputStream);
}
BlobProperties oldProperties = deserializeBlobProperties(inputStream);
ByteBuffer userMetaData = deserializeUserMetadata(inputStream);
BlobData blobData = deserializeBlob(inputStream);
ByteBuf blobDataBytes = blobData.content();
long blobPropertiesSize = oldProperties.getBlobSize();
// will be rewritten with transformed IDs
if (blobData.getBlobType().equals(BlobType.MetadataBlob)) {
ByteBuffer serializedMetadataContent = blobDataBytes.nioBuffer();
CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(serializedMetadataContent, storeKeyFactory);
Map<StoreKey, StoreKey> convertedKeys = storeKeyConverter.convert(compositeBlobInfo.getKeys());
List<StoreKey> newKeys = new ArrayList<>();
boolean isOldMetadataKeyDifferentFromNew = !oldMessageInfo.getStoreKey().getID().equals(newKey.getID());
short metadataAccountId = newBlobId.getAccountId();
short metadataContainerId = newBlobId.getContainerId();
for (StoreKey oldDataChunkKey : compositeBlobInfo.getKeys()) {
StoreKey newDataChunkKey = convertedKeys.get(oldDataChunkKey);
if (newDataChunkKey == null) {
throw new IllegalStateException("Found metadata chunk with a deprecated data chunk. " + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID());
}
if (isOldMetadataKeyDifferentFromNew && newDataChunkKey.getID().equals(oldDataChunkKey.getID())) {
throw new IllegalStateException("Found changed metadata chunk with an unchanged data chunk" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID());
}
if (!isOldMetadataKeyDifferentFromNew && !newDataChunkKey.getID().equals(oldDataChunkKey.getID())) {
throw new IllegalStateException("Found unchanged metadata chunk with a changed data chunk" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID() + " New Datachunk ID: " + newDataChunkKey.getID());
}
BlobId newDataChunkBlobId = (BlobId) newDataChunkKey;
if (newDataChunkBlobId.getAccountId() != metadataAccountId || newDataChunkBlobId.getContainerId() != metadataContainerId) {
throw new IllegalStateException("Found changed metadata chunk with a datachunk with a different account/container" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID() + " New Datachunk ID: " + newDataChunkBlobId.getID() + " Metadata AccountId: " + metadataAccountId + " Metadata ContainerId: " + metadataContainerId + " Datachunk AccountId: " + newDataChunkBlobId.getAccountId() + " Datachunk ContainerId: " + newDataChunkBlobId.getContainerId());
}
newKeys.add(newDataChunkKey);
}
ByteBuffer metadataContent;
if (compositeBlobInfo.getMetadataContentVersion() == Metadata_Content_Version_V2) {
metadataContent = MetadataContentSerDe.serializeMetadataContentV2(compositeBlobInfo.getChunkSize(), compositeBlobInfo.getTotalSize(), newKeys);
} else if (compositeBlobInfo.getMetadataContentVersion() == Metadata_Content_Version_V3) {
List<Pair<StoreKey, Long>> keyAndSizeList = new ArrayList<>();
List<CompositeBlobInfo.ChunkMetadata> chunkMetadataList = compositeBlobInfo.getChunkMetadataList();
for (int i = 0; i < newKeys.size(); i++) {
keyAndSizeList.add(new Pair<>(newKeys.get(i), chunkMetadataList.get(i).getSize()));
}
metadataContent = MetadataContentSerDe.serializeMetadataContentV3(compositeBlobInfo.getTotalSize(), keyAndSizeList);
} else {
throw new IllegalStateException("Unexpected metadata content version from composite blob: " + compositeBlobInfo.getMetadataContentVersion());
}
blobPropertiesSize = compositeBlobInfo.getTotalSize();
metadataContent.flip();
blobDataBytes.release();
blobDataBytes = Unpooled.wrappedBuffer(metadataContent);
blobData = new BlobData(blobData.getBlobType(), metadataContent.remaining(), blobDataBytes);
}
BlobProperties newProperties = new BlobProperties(blobPropertiesSize, oldProperties.getServiceId(), oldProperties.getOwnerId(), oldProperties.getContentType(), oldProperties.isPrivate(), oldProperties.getTimeToLiveInSeconds(), oldProperties.getCreationTimeInMs(), newBlobId.getAccountId(), newBlobId.getContainerId(), oldProperties.isEncrypted(), oldProperties.getExternalAssetTag(), oldProperties.getContentEncoding(), oldProperties.getFilename());
// BlobIDTransformer only exists on ambry-server and replication between servers is relying on blocking channel
// which is still using java ByteBuffer. So, no need to consider releasing stuff.
// @todo, when netty Bytebuf is adopted for blocking channel on ambry-server, remember to release this ByteBuf.
PutMessageFormatInputStream putMessageFormatInputStream = new PutMessageFormatInputStream(newKey, blobEncryptionKey, newProperties, userMetaData, new ByteBufInputStream(blobDataBytes, true), blobData.getSize(), blobData.getBlobType(), oldMessageInfo.getLifeVersion());
// Reuse the original CRC if present in the oldMessageInfo. This is important to ensure that messages that are
// received via replication are sent to the store with proper CRCs (which the store needs to detect duplicate
// messages). As an additional guard, here the original CRC is only reused if the key's ID in string form is the
// same after conversion.
Long originalCrc = oldMessageInfo.getStoreKey().getID().equals(newKey.getID()) ? oldMessageInfo.getCrc() : null;
MessageInfo info = new MessageInfo.Builder(newKey, putMessageFormatInputStream.getSize(), newProperties.getAccountId(), newProperties.getContainerId(), oldMessageInfo.getOperationTimeMs()).isTtlUpdated(oldMessageInfo.isTtlUpdated()).expirationTimeInMs(oldMessageInfo.getExpirationTimeInMs()).crc(originalCrc).lifeVersion(oldMessageInfo.getLifeVersion()).build();
return new Message(info, putMessageFormatInputStream);
} else {
throw new IllegalArgumentException("Only 'put' records are valid");
}
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class MockHost method getRemoteReplicaInfos.
/**
* Gets the list of {@link RemoteReplicaInfo} from this host to the given {@code remoteHost}
* @param remoteHost the host whose replica info is required.
* @param listener the {@link ReplicationTest.StoreEventListener} to use.
* @return the list of {@link RemoteReplicaInfo} from this host to the given {@code remoteHost}
*/
List<RemoteReplicaInfo> getRemoteReplicaInfos(MockHost remoteHost, ReplicationTest.StoreEventListener listener) {
List<? extends ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId);
List<RemoteReplicaInfo> remoteReplicaInfos = new ArrayList<>();
for (ReplicaId replicaId : replicaIds) {
for (ReplicaId peerReplicaId : replicaId.getPeerReplicaIds()) {
if (peerReplicaId.getDataNodeId().equals(remoteHost.dataNodeId)) {
PartitionId partitionId = replicaId.getPartitionId();
InMemoryStore store = storesByPartition.computeIfAbsent(partitionId, partitionId1 -> new InMemoryStore(partitionId, infosByPartition.computeIfAbsent(partitionId1, (Function<PartitionId, List<MessageInfo>>) partitionId2 -> new ArrayList<>()), buffersByPartition.computeIfAbsent(partitionId1, (Function<PartitionId, List<ByteBuffer>>) partitionId22 -> new ArrayList<>()), listener));
store.start();
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(peerReplicaId, replicaId, store, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(peerReplicaId.getDataNodeId().getPort(), PortType.PLAINTEXT));
remoteReplicaInfos.add(remoteReplicaInfo);
}
}
}
return remoteReplicaInfos;
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReplicationTest method blockDeprecatedContainerReplicationTest.
/**
* Tests if deprecated containers have been blocked during replication.
*/
@Test
public void blockDeprecatedContainerReplicationTest() throws Exception {
Properties properties = new Properties();
properties.setProperty("replication.container.deletion.enabled", "true");
replicationConfig = new ReplicationConfig(new VerifiableProperties(properties));
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
BlobId b0 = generateRandomBlobId(partitionId);
conversionMap.put(b0, b0);
BlobId b1 = generateRandomBlobId(partitionId);
conversionMap.put(b1, b1);
// add 2 messages to both hosts.
storeKeyConverter.setConversionMap(conversionMap);
storeKeyConverter.convert(conversionMap.keySet());
// addPutMessagesToReplicasOfPartition(Arrays.asList(b0), Arrays.asList(localHost, remoteHost));
// add 3 messages to the remote host only
addPutMessagesToReplicasOfPartition(Arrays.asList(b0, b1), Collections.singletonList(remoteHost));
}
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), clusterMap.getReplicaIds(localHost.dataNodeId));
replicationMetrics.populateSingleColoMetrics(remoteHost.dataNodeId.getDatacenterName());
List<RemoteReplicaInfo> remoteReplicaInfoList = localHost.getRemoteReplicaInfos(remoteHost, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = Collections.singletonMap(remoteHost.dataNodeId, remoteReplicaInfoList);
storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
Map<DataNodeId, MockHost> hosts = new HashMap<>();
hosts.put(remoteHost.dataNodeId, remoteHost);
MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, batchSize);
Predicate<MessageInfo> skipPredicate = new ReplicationSkipPredicate(accountService, replicationConfig);
ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), localHost.dataNodeId, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, localHost.dataNodeId.getDatacenterName(), new ResponseHandler(clusterMap), time, null, skipPredicate);
for (RemoteReplicaInfo remoteReplicaInfo : remoteReplicaInfoList) {
replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
}
List<RemoteReplicaInfo> remoteReplicaInfos = replicasToReplicate.get(remoteHost.dataNodeId);
DataNodeId remoteNode = remoteReplicaInfos.get(0).getReplicaId().getDataNodeId();
ReplicaMetadataResponse response = replicaThread.getReplicaMetadataResponse(remoteReplicaInfos, new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteNode);
// case1 DELETE_IN_PROGRESS container with retention time qualified.
for (int i = 0; i < 2; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getDeleteTriggerTime()).thenReturn(System.currentTimeMillis() - TimeUnit.DAYS.toMillis(replicationConfig.replicationContainerDeletionRetentionDays + 1));
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.DELETE_IN_PROGRESS);
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All DELETE_IN_PROGRESS blobs qualified with retention time should be skipped during replication", 0, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
// case2 DELETE_IN_PROGRESS container with retention time not qualified.
for (int i = 2; i < 4; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.DELETE_IN_PROGRESS);
Mockito.when(container.getDeleteTriggerTime()).thenReturn(System.currentTimeMillis());
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All DELETE_IN_PROGRESS blobs not qualified with retention time should not be skipped during replication", 2, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
// case3 INACTIVE container
for (int i = 4; i < 6; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.INACTIVE);
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All INACTIVE blobs should be skipped during replication", 0, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
// case 4 ACTIVE Container
for (int i = 6; i < 8; i++) {
RemoteReplicaInfo remoteReplicaInfo = remoteReplicaInfos.get(i);
ReplicaMetadataResponseInfo replicaMetadataResponseInfo = response.getReplicaMetadataResponseInfoList().get(i);
new ResponseHandler(clusterMap).onEvent(remoteReplicaInfo.getReplicaId(), replicaMetadataResponseInfo.getError());
for (int j = 0; j < replicaMetadataResponseInfo.getMessageInfoList().size(); j++) {
short accountId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getAccountId();
short containerId = replicaMetadataResponseInfo.getMessageInfoList().get(j).getContainerId();
Container container = Mockito.mock(Container.class);
Account account = Mockito.mock(Account.class);
Mockito.when(account.getContainerById(containerId)).thenReturn(container);
Mockito.when(accountService.getAccountById(accountId)).thenReturn(account);
Mockito.when(container.getStatus()).thenReturn(Container.ContainerStatus.ACTIVE);
}
Set<MessageInfo> remoteMissingStoreKeys = replicaThread.getMissingStoreMessages(replicaMetadataResponseInfo, remoteNode, remoteReplicaInfo);
assertEquals("All non-deprecated blobs should not be skipped during replication", 2, remoteMissingStoreKeys.size());
Map<StoreKey, StoreKey> remoteKeyToLocalKeyMap = replicaThread.batchConvertReplicaMetadataResponseKeys(response);
replicaThread.processReplicaMetadataResponse(remoteMissingStoreKeys, replicaMetadataResponseInfo, remoteReplicaInfo, remoteNode, remoteKeyToLocalKeyMap);
}
}
use of com.github.ambry.store.MessageInfo in project ambry by linkedin.
the class ReplicationTest method replicaFromStandbyToInactiveTest.
/**
* Test STANDBY -> INACTIVE transition on existing replica (both success and failure cases)
*/
@Test
public void replicaFromStandbyToInactiveTest() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
MockHelixParticipant.metricRegistry = new MetricRegistry();
MockHelixParticipant mockHelixParticipant = new MockHelixParticipant(clusterMapConfig);
Pair<StorageManager, ReplicationManager> managers = createStorageManagerAndReplicationManager(clusterMap, clusterMapConfig, mockHelixParticipant);
StorageManager storageManager = managers.getFirst();
MockReplicationManager replicationManager = (MockReplicationManager) managers.getSecond();
// get an existing partition to test both success and failure cases
PartitionId existingPartition = replicationManager.partitionToPartitionInfo.keySet().iterator().next();
storageManager.shutdownBlobStore(existingPartition);
try {
mockHelixParticipant.onPartitionBecomeInactiveFromStandby(existingPartition.toPathString());
fail("should fail because store is not started");
} catch (StateTransitionException e) {
assertEquals("Error code doesn't match", StoreNotStarted, e.getErrorCode());
}
// restart the store and trigger Standby-To-Inactive transition again
storageManager.startBlobStore(existingPartition);
// write a blob with size = 100 into local store (end offset of last PUT = 100 + 18 = 118)
Store localStore = storageManager.getStore(existingPartition);
MockId id = new MockId(TestUtils.getRandomString(10), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM));
long crc = (new Random()).nextLong();
long blobSize = 100;
MessageInfo info = new MessageInfo(id, blobSize, false, false, Utils.Infinite_Time, crc, id.getAccountId(), id.getContainerId(), Utils.Infinite_Time);
List<MessageInfo> infos = new ArrayList<>();
List<ByteBuffer> buffers = new ArrayList<>();
ByteBuffer buffer = ByteBuffer.wrap(TestUtils.getRandomBytes((int) blobSize));
infos.add(info);
buffers.add(buffer);
localStore.put(new MockMessageWriteSet(infos, buffers));
ReplicaId localReplica = storageManager.getReplica(existingPartition.toPathString());
// override partition state change listener in ReplicationManager to help thread manipulation
mockHelixParticipant.registerPartitionStateChangeListener(StateModelListenerType.ReplicationManagerListener, replicationManager.replicationListener);
CountDownLatch participantLatch = new CountDownLatch(1);
replicationManager.listenerExecutionLatch = new CountDownLatch(1);
// create a new thread and trigger STANDBY -> INACTIVE transition
Utils.newThread(() -> {
mockHelixParticipant.onPartitionBecomeInactiveFromStandby(existingPartition.toPathString());
participantLatch.countDown();
}, false).start();
assertTrue("Partition state change listener didn't get called within 1 sec", replicationManager.listenerExecutionLatch.await(1, TimeUnit.SECONDS));
assertEquals("Local store state should be INACTIVE", ReplicaState.INACTIVE, storageManager.getStore(existingPartition).getCurrentState());
List<RemoteReplicaInfo> remoteReplicaInfos = replicationManager.partitionToPartitionInfo.get(existingPartition).getRemoteReplicaInfos();
ReplicaId peerReplica1 = remoteReplicaInfos.get(0).getReplicaId();
assertFalse("Sync up should not complete because not enough replicas have caught up", mockHelixParticipant.getReplicaSyncUpManager().updateReplicaLagAndCheckSyncStatus(localReplica, peerReplica1, 10L, ReplicaState.INACTIVE));
// pick another remote replica to update the replication lag
ReplicaId peerReplica2 = remoteReplicaInfos.get(1).getReplicaId();
replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath(), 118);
assertFalse("Sync up shouldn't complete because only one replica has caught up with local replica", mockHelixParticipant.getReplicaSyncUpManager().isSyncUpComplete(localReplica));
// make second peer replica catch up with last PUT in local store
replicationManager.updateTotalBytesReadByRemoteReplica(existingPartition, peerReplica2.getDataNodeId().getHostname(), peerReplica2.getReplicaPath(), 118);
assertTrue("Standby-To-Inactive transition didn't complete within 1 sec", participantLatch.await(1, TimeUnit.SECONDS));
// we purposely update lag against local replica to verify local replica is no longer in ReplicaSyncUpManager because
// deactivation is complete and local replica should be removed from "replicaToLagInfos" map.
assertFalse("Sync up should complete (2 replicas have caught up), hence updated should be false", mockHelixParticipant.getReplicaSyncUpManager().updateReplicaLagAndCheckSyncStatus(localReplica, peerReplica2, 0L, ReplicaState.INACTIVE));
storageManager.shutdown();
}
Aggregations