use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class AmbryServerRequestsTest method listOfOriginalStoreKeysGetTest.
/**
* Tests blobIds can be converted as expected and works correctly with GetRequest.
* If all blobIds can be converted correctly, no error is expected.
* If any blobId can't be converted correctly, Blob_Not_Found is expected.
* @throws InterruptedException
* @throws IOException
*/
@Test
public void listOfOriginalStoreKeysGetTest() throws InterruptedException, IOException {
int numIds = 10;
PartitionId partitionId = clusterMap.getAllPartitionIds(null).get(0);
List<BlobId> blobIds = new ArrayList<>();
for (int i = 0; i < numIds; i++) {
BlobId originalBlobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), partitionId, false, BlobId.BlobDataType.DATACHUNK);
BlobId convertedBlobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.CRAFTED, ClusterMap.UNKNOWN_DATACENTER_ID, originalBlobId.getAccountId(), originalBlobId.getContainerId(), partitionId, false, BlobId.BlobDataType.DATACHUNK);
conversionMap.put(originalBlobId, convertedBlobId);
validKeysInStore.add(convertedBlobId);
blobIds.add(originalBlobId);
}
sendAndVerifyGetOriginalStoreKeys(blobIds, ServerErrorCode.No_Error);
// test with duplicates
List<BlobId> blobIdsWithDups = new ArrayList<>(blobIds);
// add the same blob ids
blobIdsWithDups.addAll(blobIds);
// add converted ids
conversionMap.values().forEach(id -> blobIdsWithDups.add((BlobId) id));
sendAndVerifyGetOriginalStoreKeys(blobIdsWithDups, ServerErrorCode.No_Error);
// store must not have received duplicates
assertEquals("Size is not as expected", blobIds.size(), MockStorageManager.idsReceived.size());
for (int i = 0; i < blobIds.size(); i++) {
BlobId key = blobIds.get(i);
StoreKey converted = conversionMap.get(key);
assertEquals(key + "/" + converted + " was not received at the store", converted, MockStorageManager.idsReceived.get(i));
}
// Check a valid key mapped to null
BlobId originalBlobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), partitionId, false, BlobId.BlobDataType.DATACHUNK);
blobIds.add(originalBlobId);
conversionMap.put(originalBlobId, null);
validKeysInStore.add(originalBlobId);
sendAndVerifyGetOriginalStoreKeys(blobIds, ServerErrorCode.No_Error);
// Check a invalid key mapped to null
originalBlobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), partitionId, false, BlobId.BlobDataType.DATACHUNK);
blobIds.add(originalBlobId);
conversionMap.put(originalBlobId, null);
sendAndVerifyGetOriginalStoreKeys(blobIds, ServerErrorCode.Blob_Not_Found);
// Check exception
storeKeyConverterFactory.setException(new Exception("StoreKeyConverter Mock Exception"));
sendAndVerifyGetOriginalStoreKeys(blobIds, ServerErrorCode.Unknown_Error);
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class CloudBlobStore method get.
@Override
public StoreInfo get(List<? extends StoreKey> ids, EnumSet<StoreGetOptions> storeGetOptions) throws StoreException {
checkStarted();
checkStoreKeyDuplicates(ids);
List<CloudMessageReadSet.BlobReadInfo> blobReadInfos = new ArrayList<>(ids.size());
List<MessageInfo> messageInfos = new ArrayList<>(ids.size());
try {
List<BlobId> blobIdList = ids.stream().map(key -> (BlobId) key).collect(Collectors.toList());
Map<String, CloudBlobMetadata> cloudBlobMetadataListMap = requestAgent.doWithRetries(() -> cloudDestination.getBlobMetadata(blobIdList), "GetBlobMetadata", partitionId.toPathString());
// Throw StoreException with ID_Not_Found if cloudBlobMetadataListMap size is less than expected.
if (cloudBlobMetadataListMap.size() < blobIdList.size()) {
Set<BlobId> missingBlobs = blobIdList.stream().filter(blobId -> !cloudBlobMetadataListMap.containsKey(blobId)).collect(Collectors.toSet());
throw new StoreException("Some of the keys were missing in the cloud metadata store: " + missingBlobs, StoreErrorCodes.ID_Not_Found);
}
long currentTimeStamp = System.currentTimeMillis();
// Validate cloud meta data, may throw StoreException with ID_Deleted, TTL_Expired and Authorization_Failure
validateCloudMetadata(cloudBlobMetadataListMap, storeGetOptions, currentTimeStamp, ids);
for (BlobId blobId : blobIdList) {
CloudBlobMetadata blobMetadata = cloudBlobMetadataListMap.get(blobId.getID());
// TODO: need to add ttlUpdated to CloudBlobMetadata so we can use it here
// For now, set ttlUpdated = true for all permanent blobs, so the correct ttl
// is applied by GetOperation.
boolean ttlUpdated = blobMetadata.getExpirationTime() == Utils.Infinite_Time;
boolean deleted = blobMetadata.getDeletionTime() != Utils.Infinite_Time;
MessageInfo messageInfo = new MessageInfo(blobId, blobMetadata.getSize(), deleted, ttlUpdated, blobMetadata.isUndeleted(), blobMetadata.getExpirationTime(), null, (short) blobMetadata.getAccountId(), (short) blobMetadata.getContainerId(), getOperationTime(blobMetadata), blobMetadata.getLifeVersion());
messageInfos.add(messageInfo);
blobReadInfos.add(new CloudMessageReadSet.BlobReadInfo(blobMetadata, blobId));
}
} catch (CloudStorageException e) {
if (e.getCause() instanceof StoreException) {
throw (StoreException) e.getCause();
} else {
throw new StoreException(e, StoreErrorCodes.IOError);
}
}
CloudMessageReadSet messageReadSet = new CloudMessageReadSet(blobReadInfos, this);
return new StoreInfo(messageReadSet, messageInfos);
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicaThread method getMissingStoreKeys.
/**
* Gets the missing store keys by comparing the messages from the remote node
* @param replicaMetadataResponseInfo The response that contains the messages from the remote node
* @param remoteNode The remote node from which replication needs to happen
* @param remoteReplicaInfo The remote replica that contains information about the remote replica id
* @return List of store keys that are missing from the local store
* @throws StoreException
*/
private Set<StoreKey> getMissingStoreKeys(ReplicaMetadataResponseInfo replicaMetadataResponseInfo, DataNodeId remoteNode, RemoteReplicaInfo remoteReplicaInfo) throws StoreException {
long startTime = SystemTime.getInstance().milliseconds();
List<MessageInfo> messageInfoList = replicaMetadataResponseInfo.getMessageInfoList();
List<StoreKey> storeKeysToCheck = new ArrayList<StoreKey>(messageInfoList.size());
for (MessageInfo messageInfo : messageInfoList) {
storeKeysToCheck.add(messageInfo.getStoreKey());
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key from remote: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), messageInfo.getStoreKey());
}
Set<StoreKey> missingStoreKeys = remoteReplicaInfo.getLocalStore().findMissingKeys(storeKeysToCheck);
for (StoreKey storeKey : missingStoreKeys) {
logger.trace("Remote node: {} Thread name: {} Remote replica: {} Key missing id: {}", remoteNode, threadName, remoteReplicaInfo.getReplicaId(), storeKey);
}
replicationMetrics.updateCheckMissingKeysTime(SystemTime.getInstance().milliseconds() - startTime, replicatingFromRemoteColo, datacenterName);
return missingStoreKeys;
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class ReplicationTest method eliminateDuplicates.
/**
* We can have duplicate entries in the message entries since updates can happen to the same key. For example,
* insert a key followed by a delete. This would create two entries in the journal or the index. A single findInfo
* could read both the entries. The findInfo should return as clean information as possible. This method removes
* the oldest duplicate in the list.
* @param messageEntries The message entry list where duplicates need to be removed
*/
private static void eliminateDuplicates(List<MessageInfo> messageEntries) {
Set<StoreKey> setToFindDuplicate = new HashSet<StoreKey>();
ListIterator<MessageInfo> messageEntriesIterator = messageEntries.listIterator(messageEntries.size());
while (messageEntriesIterator.hasPrevious()) {
MessageInfo messageInfo = messageEntriesIterator.previous();
if (setToFindDuplicate.contains(messageInfo.getStoreKey())) {
messageEntriesIterator.remove();
} else {
setToFindDuplicate.add(messageInfo.getStoreKey());
}
}
}
use of com.github.ambry.store.StoreKey in project ambry by linkedin.
the class MockReadableStreamChannel method verifyCompositeBlob.
/**
* Verify Composite blob for content, userMetadata and
* @param properties {@link BlobProperties} of the blob
* @param originalPutContent original out content
* @param originalUserMetadata original user-metadata
* @param dataBlobIds {@link List} of {@link StoreKey}s of the composite blob in context
* @param request {@link com.github.ambry.protocol.PutRequest.ReceivedPutRequest} to fetch info from
* @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
* @throws Exception
*/
private void verifyCompositeBlob(BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, List<StoreKey> dataBlobIds, PutRequest.ReceivedPutRequest request, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
StoreKey lastKey = dataBlobIds.get(dataBlobIds.size() - 1);
byte[] content = new byte[(int) request.getBlobProperties().getBlobSize()];
AtomicInteger offset = new AtomicInteger(0);
for (StoreKey key : dataBlobIds) {
PutRequest.ReceivedPutRequest dataBlobPutRequest = deserializePutRequest(serializedRequests.get(key.getID()));
AtomicInteger dataBlobLength = new AtomicInteger((int) dataBlobPutRequest.getBlobSize());
InputStream dataBlobStream = dataBlobPutRequest.getBlobStream();
if (!properties.isEncrypted()) {
Utils.readBytesFromStream(dataBlobStream, content, offset.get(), dataBlobLength.get());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, dataBlobPutRequest.getUsermetadata().array());
} else {
byte[] dataBlobContent = Utils.readBytesFromStream(dataBlobStream, dataBlobLength.get());
// reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
// assertion failures in non main thread will not fail the test.
new DecryptJob(dataBlobPutRequest.getBlobId(), dataBlobPutRequest.getBlobEncryptionKey().duplicate(), ByteBuffer.wrap(dataBlobContent), dataBlobPutRequest.getUsermetadata().duplicate(), cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
Assert.assertNull("Exception should not be thrown", exception);
Assert.assertEquals("BlobId mismatch", dataBlobPutRequest.getBlobId(), result.getBlobId());
Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
dataBlobLength.set(result.getDecryptedBlobContent().remaining());
result.getDecryptedBlobContent().get(content, offset.get(), dataBlobLength.get());
}).run();
}
if (key != lastKey) {
Assert.assertEquals("all chunks except last should be fully filled", chunkSize, dataBlobLength.get());
} else {
Assert.assertEquals("Last chunk should be of non-zero length and equal to the length of the remaining bytes", (originalPutContent.length - 1) % chunkSize + 1, dataBlobLength.get());
}
offset.addAndGet(dataBlobLength.get());
Assert.assertEquals("dataBlobStream should have no more data", -1, dataBlobStream.read());
notificationSystem.verifyNotification(key.getID(), NotificationBlobType.DataChunk, dataBlobPutRequest.getBlobProperties());
}
Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
}
Aggregations