use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicationTest method deletionAfterMetadataExchangeTest.
/**
* Test the case where a blob gets deleted after a replication metadata exchange completes and identifies the blob as
* a candidate. The subsequent GetRequest should succeed as Replication makes a Include_All call, and
* fixMissingStoreKeys() should succeed without exceptions. The blob should not be put locally.
*/
@Test
public void deletionAfterMetadataExchangeTest() throws Exception {
int batchSize = 400;
ReplicationTestSetup testSetup = new ReplicationTestSetup(batchSize);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
List<PartitionId> partitionIds = testSetup.partitionIds;
MockHost remoteHost = testSetup.remoteHost;
MockHost localHost = testSetup.localHost;
Map<PartitionId, Set<StoreKey>> idsToExpectByPartition = new HashMap<>();
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
// add 5 messages to remote host only.
Set<StoreKey> expectedIds = new HashSet<>(addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 5));
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
// add an expired message to the remote host only
StoreKey id = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK);
PutMsgInfoAndBuffer msgInfoAndBuffer = createPutMessage(id, accountId, containerId, toEncrypt);
remoteHost.addMessage(partitionId, new MessageInfo(id, msgInfoAndBuffer.byteBuffer.remaining(), 1, accountId, containerId, msgInfoAndBuffer.messageInfo.getOperationTimeMs()), msgInfoAndBuffer.byteBuffer);
// add 3 messages to the remote host only
expectedIds.addAll(addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 3));
// delete the very first blob in the remote host only (and delete it from expected list)
Iterator<StoreKey> iter = expectedIds.iterator();
addDeleteMessagesToReplicasOfPartition(partitionId, iter.next(), Collections.singletonList(remoteHost));
iter.remove();
// PUT and DELETE a blob in the remote host only
id = addPutMessagesToReplicasOfPartition(partitionId, Collections.singletonList(remoteHost), 1).get(0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost));
idsToExpectByPartition.put(partitionId, expectedIds);
}
// Do the replica metadata exchange.
List<ReplicaThread.ExchangeMetadataResponse> responses = testSetup.replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), testSetup.replicasToReplicate.get(remoteHost.dataNodeId));
Assert.assertEquals("Actual keys in Exchange Metadata Response different from expected", idsToExpectByPartition.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), responses.stream().map(ReplicaThread.ExchangeMetadataResponse::getMissingStoreKeys).flatMap(Collection::stream).collect(Collectors.toSet()));
// of replication must be successful.
for (PartitionId partitionId : partitionIds) {
Iterator<StoreKey> iter = idsToExpectByPartition.get(partitionId).iterator();
iter.next();
StoreKey keyToDelete = iter.next();
addDeleteMessagesToReplicasOfPartition(partitionId, keyToDelete, Collections.singletonList(remoteHost));
iter.remove();
}
testSetup.replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost, batchSize), testSetup.replicasToReplicate.get(remoteHost.dataNodeId), responses, false);
Assert.assertEquals(idsToExpectByPartition.keySet(), localHost.infosByPartition.keySet());
Assert.assertEquals("Actual keys in Exchange Metadata Response different from expected", idsToExpectByPartition.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), localHost.infosByPartition.values().stream().flatMap(Collection::stream).map(MessageInfo::getStoreKey).collect(Collectors.toSet()));
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_NotDelete.
/**
* Tests when lifeVersion in local is less than the lifeVersion in remote and the final state is not
* delete, it would be Put, TtlUpdate or Undelete.
* @throws Exception
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_NotDelete() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
// 5 P0, D0 -> [U1]
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), (short) 1, 5);
for (StoreKey id : ids) {
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), UPDATED_EXPIRY_TIME_MS, (short) 1);
}
// 1 Missing
StoreKey id = ids.get(0);
// 2 P0 -> [U1, T1]
id = ids.get(1);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
// 3 P0, T0 -> [U1]
id = ids.get(2);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
// 4 P0, T0, D0 -> [U1]
id = ids.get(3);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Arrays.asList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, UPDATED_EXPIRY_TIME_MS);
// 5 P, D -> [U, T, D]
id = ids.get(4);
addPutMessagesToReplicasOfPartition(Collections.singletonList(id), Collections.singletonList(localHost));
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, UPDATED_EXPIRY_TIME_MS);
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// There is one missing key
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(1, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
replicaThread.fixMissingStoreKeys(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos, response, false);
// Before exchange metadata, the number of message infos in local host is 8. Exchange metadata would add another 8.
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 16, localInfoEntry.getValue().size());
}
for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
for (StoreKey id : idsEntry.getValue()) {
if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
assertTrue(localInfo.isDeleted());
assertTrue(remoteInfo.isDeleted());
assertTrue(localInfo.isTtlUpdated());
assertTrue(remoteInfo.isTtlUpdated());
assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
}
}
}
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicationTest method replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete.
/**
* Tests when the lifeVersion in local is less than the lifeVersion in remote and the final state from remote
* is delete.
* @throws Exception
*/
@Test
public void replicaThreadLifeVersionLocalLessThanRemote_FinalState_Delete() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
storeKeyConverter.setConversionMap(conversionMap);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<PartitionId, List<StoreKey>> idsByPartition = new HashMap<>();
Map<PartitionId, StoreKey> idsToBeIgnoredByPartition = new HashMap<>();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
// 1 missing 2 Delete 3 Put(w/ or w/o ttl update)
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<StoreKey> ids = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), 1);
// Adding a Put and Delete to remote but nothing in local
StoreKey id = ids.get(0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
idsToBeIgnoredByPartition.put(partitionId, id);
// Adding one Delete to remote and add delete to local but with lower lifeVersion
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add the same put to local host
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add same Put and a TtlUpdate to local host
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), UPDATED_EXPIRY_TIME_MS, (short) 0);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1, EXPIRY_TIME_MS);
// Adding one Put and Delete to remote and add same Put and a Delete and Undelete to local.
id = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost, localHost), 1).get(0);
ids.add(id);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(localHost), (short) 0, EXPIRY_TIME_MS);
addUndeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 1);
addDeleteMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), (short) 2, EXPIRY_TIME_MS);
ids.add(id);
idsByPartition.put(partitionId, ids);
}
int batchSize = 100;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// It's all deletes, there is no missing key.
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(0, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
}
// Before exchange metadata, the number of message infos in local host is 7. Exchange metadata would add another 4(all deletes).
for (Map.Entry<PartitionId, List<MessageInfo>> localInfoEntry : localHost.infosByPartition.entrySet()) {
assertEquals("MessageInfo number mismatch", 11, localInfoEntry.getValue().size());
}
for (Map.Entry<PartitionId, List<StoreKey>> idsEntry : idsByPartition.entrySet()) {
List<MessageInfo> remoteInfos = remoteHost.infosByPartition.get(idsEntry.getKey());
List<MessageInfo> localInfos = localHost.infosByPartition.get(idsEntry.getKey());
for (StoreKey id : idsEntry.getValue()) {
if (!idsToBeIgnoredByPartition.get(idsEntry.getKey()).equals(id)) {
MessageInfo localInfo = getMergedMessageInfo(id, localInfos);
MessageInfo remoteInfo = getMergedMessageInfo(id, remoteInfos);
assertTrue(localInfo.isDeleted());
assertTrue(remoteInfo.isDeleted());
assertEquals(localInfo.getLifeVersion(), remoteInfo.getLifeVersion());
}
}
}
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicationTestHelper method addPutMessagesToReplicasOfPartition.
public static List<StoreKey> addPutMessagesToReplicasOfPartition(PartitionId partitionId, List<MockHost> hosts, short lifeVersion, int count) throws MessageFormatException, IOException {
List<StoreKey> ids = new ArrayList<>();
for (int i = 0; i < count; i++) {
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
boolean toEncrypt = i % 2 == 0;
BlobId id = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK);
ids.add(id);
PutMsgInfoAndBuffer msgInfoAndBuffer = createPutMessage(id, id.getAccountId(), id.getContainerId(), toEncrypt, lifeVersion);
for (MockHost host : hosts) {
host.addMessage(partitionId, msgInfoAndBuffer.messageInfo, msgInfoAndBuffer.byteBuffer.duplicate());
}
}
return ids;
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class DecryptCallBackResultInfo method onChunkOperationComplete.
/**
* Do all that needs to be done (cleanup, notification, etc.) on chunk completion and mark the state of the chunk
* appropriately.
* @param chunk the chunk that has completed.
*/
private void onChunkOperationComplete(GetChunk chunk) {
if (chunk.getChunkException() != null) {
// if operation callback was already called, then this exception will have to be notified as part of the
// read callback.
setOperationException(chunk.getChunkException());
}
Exception e = getOperationException();
if (chunk == firstChunk) {
if (operationCallbackInvoked.compareAndSet(false, true)) {
if (options.getChunkIdsOnly) {
// If this is an operation just to get the chunk ids, then these ids will be returned as part of the
// result callback and no more chunks will be fetched, so mark the operation as complete to let the
// GetManager remove this operation.
setOperationCompleted();
List<StoreKey> chunkIds = e == null && compositeBlobInfo != null ? compositeBlobInfo.getKeys() : null;
operationResult = new GetBlobResultInternal(null, chunkIds);
} else {
// Complete the operation from the caller's perspective, so that the caller can start reading from the
// channel if there is no exception. The operation will not be marked as complete internally as subsequent
// chunk retrievals and channel writes will need to happen and for that, this operation needs the GetManager to
// poll it periodically. If any exception is encountered while processing subsequent chunks, those will be
// notified during the channel read.
long timeElapsed = time.milliseconds() - submissionTimeMs;
if (isEncrypted) {
routerMetrics.getEncryptedBlobOperationLatencyMs.update(timeElapsed);
} else {
routerMetrics.getBlobOperationLatencyMs.update(timeElapsed);
}
if (e == null) {
if (blobInfo != null) {
// For segmented blob we have already filtered out not-required chunks by now. So the first chunk is the segment we need.
fixBlobSizeIfRequired(blobInfo.getBlobProperties().getBlobSize(), options.getBlobOptions.hasBlobSegmentIdx() ? firstChunk.getChunkMetadataList().get(0).getSize() : totalSize);
}
if (options.getBlobOptions.getOperationType() != GetBlobOptions.OperationType.BlobInfo) {
blobDataChannel = new BlobDataReadableStreamChannel();
} else {
setOperationCompleted();
}
operationResult = new GetBlobResultInternal(new GetBlobResult(blobInfo, blobDataChannel), null);
} else {
blobDataChannel = null;
operationResult = null;
routerMetrics.onGetBlobError(e, options, isEncrypted);
}
}
NonBlockingRouter.completeOperation(null, getOperationCallback, operationResult, e);
}
}
chunk.postCompletionCleanup();
if (blobDataChannel != null) {
blobDataChannel.maybeWriteToChannel();
}
}
Aggregations