use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicationTestHelper method lifeVersionLocalGreaterThanRemote_Delete.
/**
* Helepr function to test when the local lifeVersion is greater than the remote lifeVersion.
* @param localTtlUpdated
* @param remoteTtlUpdated
* @throws Exception
*/
protected void lifeVersionLocalGreaterThanRemote_Delete(boolean localTtlUpdated, boolean remoteTtlUpdated) throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
// add 1 messages to remote host with lifeVersion being 0 and add it local host with lifeVersion being 1.
StoreKey toDeleteId = addPutMessagesToReplicasOfPartition(partitionId, Arrays.asList(remoteHost), 1).get(0);
if (remoteTtlUpdated) {
addTtlUpdateMessagesToReplicasOfPartition(partitionId, toDeleteId, Arrays.asList(remoteHost), UPDATED_EXPIRY_TIME_MS);
}
addDeleteMessagesToReplicasOfPartition(partitionId, toDeleteId, Collections.singletonList(remoteHost));
BlobId blobId = (BlobId) toDeleteId;
short accountId = blobId.getAccountId();
short containerId = blobId.getContainerId();
short lifeVersion = 1;
// first put message has encryption turned on
boolean toEncrypt = true;
// create a put message with lifeVersion bigger than 0
PutMsgInfoAndBuffer msgInfoAndBuffer = createPutMessage(toDeleteId, accountId, containerId, toEncrypt, lifeVersion);
localHost.addMessage(partitionId, new MessageInfo(toDeleteId, msgInfoAndBuffer.byteBuffer.remaining(), false, false, false, Utils.Infinite_Time, null, accountId, containerId, msgInfoAndBuffer.messageInfo.getOperationTimeMs(), lifeVersion), msgInfoAndBuffer.byteBuffer);
if (localTtlUpdated) {
addTtlUpdateMessagesToReplicasOfPartition(partitionId, toDeleteId, Collections.singletonList(localHost), EXPIRY_TIME_MS, lifeVersion);
}
// ensure that the first key is not deleted in the local host
assertNull(toDeleteId + " should not be deleted in the local host", getMessageInfo(toDeleteId, localHost.infosByPartition.get(partitionId), true, false, false));
}
int batchSize = 4;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, null, null, null);
List<RemoteReplicaInfo> remoteReplicaInfos = replicasAndThread.getFirst().get(remoteHost.dataNodeId);
ReplicaThread replicaThread = replicasAndThread.getSecond();
// Do the replica metadata exchange.
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, batchSize), remoteReplicaInfos);
assertEquals("Response should contain a response for each replica", remoteReplicaInfos.size(), response.size());
for (int i = 0; i < response.size(); i++) {
// we don't have any missing key here.
assertEquals(0, response.get(i).missingStoreMessages.size());
remoteReplicaInfos.get(i).setToken(response.get(i).remoteToken);
PartitionId partitionId = partitionIds.get(i);
StoreKey key = localHost.infosByPartition.get(partitionId).get(0).getStoreKey();
assertNull(key + " should not be deleted in the local host", getMessageInfo(key, localHost.infosByPartition.get(partitionId), true, false, false));
if (!localTtlUpdated) {
assertNull(key + " should not be ttlUpdated in the local host", getMessageInfo(key, localHost.infosByPartition.get(partitionId), false, false, true));
}
}
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicationTestHelper method verifyNoMoreMissingKeysAndExpectedMissingBufferCount.
/**
* Verifies that there are no more missing keys between the local and remote host and that
* there are an expected amount of missing buffers between the remote and local host
* @param remoteHost
* @param localHost
* @param replicaThread
* @param replicasToReplicate
* @param idsToBeIgnoredByPartition
* @param storeKeyConverter
* @param expectedIndex
* @param expectedIndexOdd
* @param expectedMissingBuffers
* @throws Exception
*/
protected void verifyNoMoreMissingKeysAndExpectedMissingBufferCount(MockHost remoteHost, MockHost localHost, ReplicaThread replicaThread, Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate, Map<PartitionId, List<StoreKey>> idsToBeIgnoredByPartition, StoreKeyConverter storeKeyConverter, int expectedIndex, int expectedIndexOdd, int expectedMissingBuffers) throws Exception {
// no more missing keys
List<ReplicaThread.ExchangeMetadataResponse> response = replicaThread.exchangeMetadata(new MockConnectionPool.MockConnection(remoteHost, 4), replicasToReplicate.get(remoteHost.dataNodeId));
assertEquals("Response should contain a response for each replica", replicasToReplicate.get(remoteHost.dataNodeId).size(), response.size());
for (int i = 0; i < response.size(); i++) {
assertEquals(0, response.get(i).missingStoreMessages.size());
assertEquals(i % 2 == 0 ? expectedIndex : expectedIndexOdd, ((MockFindToken) response.get(i).remoteToken).getIndex());
}
Map<PartitionId, List<MessageInfo>> missingInfos = remoteHost.getMissingInfos(localHost.infosByPartition, storeKeyConverter);
for (Map.Entry<PartitionId, List<MessageInfo>> entry : missingInfos.entrySet()) {
// test that the first key has been marked deleted
List<MessageInfo> messageInfos = localHost.infosByPartition.get(entry.getKey());
StoreKey deletedId = messageInfos.get(0).getStoreKey();
assertNotNull(deletedId + " should have been deleted", getMessageInfo(deletedId, messageInfos, true, false, false));
Map<StoreKey, Boolean> ignoreState = new HashMap<>();
for (StoreKey toBeIgnored : idsToBeIgnoredByPartition.get(entry.getKey())) {
ignoreState.put(toBeIgnored, false);
}
for (MessageInfo messageInfo : entry.getValue()) {
StoreKey id = messageInfo.getStoreKey();
if (!id.equals(deletedId)) {
assertTrue("Message should be eligible to be ignored: " + id, ignoreState.containsKey(id));
ignoreState.put(id, true);
}
}
for (Map.Entry<StoreKey, Boolean> stateInfo : ignoreState.entrySet()) {
assertTrue(stateInfo.getKey() + " should have been ignored", stateInfo.getValue());
}
}
Map<PartitionId, List<ByteBuffer>> missingBuffers = remoteHost.getMissingBuffers(localHost.buffersByPartition);
for (Map.Entry<PartitionId, List<ByteBuffer>> entry : missingBuffers.entrySet()) {
assertEquals(expectedMissingBuffers, entry.getValue().size());
}
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class InMemoryStore method findEntriesSince.
@Override
public FindInfo findEntriesSince(FindToken token, long maxSizeOfEntries, String hostname, String remoteReplicaPath) throws StoreException {
// unused function
MockFindToken mockToken = (MockFindToken) token;
List<MessageInfo> entriesToReturn = new ArrayList<>();
long currentSizeOfEntriesInBytes = 0;
int index = mockToken.getIndex();
Set<StoreKey> processedKeys = new HashSet<>();
while (currentSizeOfEntriesInBytes < maxSizeOfEntries && index < messageInfos.size()) {
StoreKey key = messageInfos.get(index).getStoreKey();
if (processedKeys.add(key)) {
entriesToReturn.add(getMergedMessageInfo(key, messageInfos));
}
// still use the size of the put (if the original picked up is the put.
currentSizeOfEntriesInBytes += messageInfos.get(index).getSize();
index++;
}
int startIndex = mockToken.getIndex();
int totalSizeRead = 0;
for (int i = 0; i < startIndex; i++) {
totalSizeRead += messageInfos.get(i).getSize();
}
totalSizeRead += currentSizeOfEntriesInBytes;
return new FindInfo(entriesToReturn, new MockFindToken(mockToken.getIndex() + entriesToReturn.size(), totalSizeRead));
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class InMemoryStore method undelete.
@Override
public short undelete(MessageInfo info) throws StoreException {
StoreKey key = info.getStoreKey();
MessageInfo deleteInfo = getMessageInfo(key, messageInfos, true, false, false);
if (info.getLifeVersion() == -1 && deleteInfo == null) {
throw new StoreException("Key " + key + " not delete yet", StoreErrorCodes.ID_Not_Deleted);
}
short lifeVersion = info.getLifeVersion();
MessageInfo latestInfo = deleteInfo;
if (info.getLifeVersion() == MessageInfo.LIFE_VERSION_FROM_FRONTEND) {
if (deleteInfo == null) {
throw new StoreException("Id " + key + " requires first value to be a put and last value to be a delete", StoreErrorCodes.ID_Not_Deleted);
}
lifeVersion = (short) (deleteInfo.getLifeVersion() + 1);
} else {
if (deleteInfo == null) {
latestInfo = getMergedMessageInfo(key, messageInfos);
}
}
try {
MessageFormatInputStream stream = new UndeleteMessageFormatInputStream(key, info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), lifeVersion);
// Update info to add stream size;
info = new MessageInfo(key, stream.getSize(), false, latestInfo.isTtlUpdated(), true, latestInfo.getExpirationTimeInMs(), null, info.getAccountId(), info.getContainerId(), info.getOperationTimeMs(), lifeVersion);
MessageFormatWriteSet writeSet = new MessageFormatWriteSet(stream, Collections.singletonList(info), false);
writeSet.writeTo(log);
messageInfos.add(info);
return lifeVersion;
} catch (Exception e) {
throw new StoreException("Unknown error while trying to undelete blobs from store", e, StoreErrorCodes.Unknown_Error);
}
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class GetBlobOperationTest method testBlobSizeReplacement.
/**
* A past issue with replication logic resulted in the blob size listed in the blob properties reflecting the size
* of a chunk's content buffer instead of the plaintext size of the entire blob. This issue affects composite blobs
* and simple encrypted blob. This test tests the router's ability to replace the incorrect blob size field in the
* blob properties with the inferred correct size.
* @throws Exception
*/
@Test
public void testBlobSizeReplacement() throws Exception {
userMetadata = new byte[10];
random.nextBytes(userMetadata);
options = new GetBlobOptionsInternal(new GetBlobOptionsBuilder().operationType(GetBlobOptions.OperationType.BlobInfo).build(), false, routerMetrics.ageAtGet);
// test simple blob case
blobSize = maxChunkSize;
putContent = new byte[blobSize];
random.nextBytes(putContent);
blobProperties = new BlobProperties(blobSize + 20, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(random), Utils.getRandomShort(random), testEncryption, null, null, null);
ByteBuf putContentBuf = PooledByteBufAllocator.DEFAULT.heapBuffer(blobSize);
putContentBuf.writeBytes(putContent);
doDirectPut(BlobType.DataBlob, putContentBuf.retainedDuplicate());
putContentBuf.release();
Counter sizeMismatchCounter = (testEncryption ? routerMetrics.simpleEncryptedBlobSizeMismatchCount : routerMetrics.simpleUnencryptedBlobSizeMismatchCount);
long startCount = sizeMismatchCounter.getCount();
getAndAssertSuccess();
long endCount = sizeMismatchCounter.getCount();
Assert.assertEquals("Wrong number of blob size mismatches", 1, endCount - startCount);
// test composite blob case
int numChunks = 3;
blobSize = maxChunkSize;
List<StoreKey> storeKeys = new ArrayList<>(numChunks);
for (int i = 0; i < numChunks; i++) {
doPut();
storeKeys.add(blobId);
}
blobSize = maxChunkSize * numChunks;
ByteBuffer metadataContent = MetadataContentSerDe.serializeMetadataContentV2(maxChunkSize, blobSize, storeKeys);
metadataContent.flip();
blobProperties = new BlobProperties(blobSize - 20, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(random), Utils.getRandomShort(random), testEncryption, null, null, null);
ByteBuf metadataContentBuf = PooledByteBufAllocator.DEFAULT.heapBuffer(metadataContent.remaining());
metadataContentBuf.writeBytes(metadataContent.duplicate());
doDirectPut(BlobType.MetadataBlob, metadataContentBuf.retainedDuplicate());
metadataContentBuf.release();
startCount = routerMetrics.compositeBlobSizeMismatchCount.getCount();
getAndAssertSuccess();
endCount = routerMetrics.compositeBlobSizeMismatchCount.getCount();
Assert.assertEquals("Wrong number of blob size mismatches", 1, endCount - startCount);
}
Aggregations