use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureCloudDestination method getBlobMetadata.
@Override
public Map<String, CloudBlobMetadata> getBlobMetadata(List<BlobId> blobIds) throws CloudStorageException {
Objects.requireNonNull(blobIds, "blobIds cannot be null");
if (blobIds.isEmpty()) {
return Collections.emptyMap();
}
// needs to include that store key to replay the upload.
if (!isVcr && blobIds.size() == 1) {
CloudBlobMetadata metadata = azureBlobDataAccessor.getBlobMetadata(blobIds.get(0));
return metadata == null ? Collections.emptyMap() : Collections.singletonMap(metadata.getId(), metadata);
}
// CosmosDB has query size limit of 256k chars.
// Break list into chunks if necessary to avoid overflow.
List<CloudBlobMetadata> metadataList = new ArrayList<>();
List<List<BlobId>> chunkedBlobIdList = Utils.partitionList(blobIds, queryBatchSize);
for (List<BlobId> batchOfBlobs : chunkedBlobIdList) {
metadataList.addAll(getBlobMetadataChunked(batchOfBlobs));
}
return metadataList.stream().collect(Collectors.toMap(CloudBlobMetadata::getId, Function.identity(), (x, y) -> x));
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureBlobLayoutStrategyTest method testDefaultStrategy.
/**
* Test default strategy and make sure it is partition-based
*/
@Test
public void testDefaultStrategy() {
AzureCloudConfig azureCloudConfig = new AzureCloudConfig(new VerifiableProperties(configProps));
AzureBlobLayoutStrategy strategy = new AzureBlobLayoutStrategy(clusterName, azureCloudConfig);
BlobLayout layout = strategy.getDataBlobLayout(blobId);
// Container name should be main-666
String expectedContainerName = clusterName + "-" + partitionPath;
String blobIdStr = blobId.getID();
String expectedBlobName = blobIdStr.substring(blobIdStr.length() - 4) + "-" + blobIdStr;
checkLayout(layout, expectedContainerName, expectedBlobName);
CloudBlobMetadata blobMetadata = new CloudBlobMetadata(blobId, 0, 0, 0, null);
layout = strategy.getDataBlobLayout(blobMetadata);
checkLayout(layout, expectedContainerName, expectedBlobName);
// Tokens should go in same container
layout = strategy.getTokenBlobLayout(partitionPath, tokenFileName);
checkLayout(layout, expectedContainerName, tokenFileName);
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureBlobLayoutStrategyTest method testContainerStrategy.
/**
* Test container layout strategy
*/
@Test
public void testContainerStrategy() {
configProps.setProperty(AzureCloudConfig.AZURE_BLOB_CONTAINER_STRATEGY, "container");
AzureCloudConfig azureCloudConfig = new AzureCloudConfig(new VerifiableProperties(configProps));
AzureBlobLayoutStrategy strategy = new AzureBlobLayoutStrategy(clusterName, azureCloudConfig);
BlobLayout layout = strategy.getDataBlobLayout(blobId);
// Container name should be main-101-5
String expectedContainerName = String.format("%s-%d-%d", clusterName, AzureTestUtils.accountId, AzureTestUtils.containerId);
String blobIdStr = blobId.getID();
String expectedBlobName = blobIdStr.substring(blobIdStr.length() - 4) + "-" + blobIdStr;
checkLayout(layout, expectedContainerName, expectedBlobName);
CloudBlobMetadata blobMetadata = new CloudBlobMetadata(blobId, 0, 0, 0, null);
layout = strategy.getDataBlobLayout(blobMetadata);
checkLayout(layout, expectedContainerName, expectedBlobName);
// Tokens should go in their own container: main_replicaTokens
layout = strategy.getTokenBlobLayout(partitionPath, tokenFileName);
expectedContainerName = clusterName + "-" + tokenFileName.toLowerCase();
expectedBlobName = partitionPath + "/" + tokenFileName;
checkLayout(layout, expectedContainerName, expectedBlobName);
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CloudBlobStore method putBlob.
/**
* Upload the blob to the cloud destination.
* @param messageInfo the {@link MessageInfo} containing blob metadata.
* @param messageBuf the bytes to be uploaded.
* @param size the number of bytes to upload.
* @throws CloudStorageException if the upload failed.
*/
private void putBlob(MessageInfo messageInfo, ByteBuffer messageBuf, long size) throws CloudStorageException, IOException, StoreException {
if (shouldUpload(messageInfo)) {
BlobId blobId = (BlobId) messageInfo.getStoreKey();
boolean isRouterEncrypted = isRouterEncrypted(blobId);
EncryptionOrigin encryptionOrigin = isRouterEncrypted ? EncryptionOrigin.ROUTER : EncryptionOrigin.NONE;
boolean encryptThisBlob = requireEncryption && !isRouterEncrypted;
boolean uploaded;
if (encryptThisBlob) {
// Need to encrypt the buffer before upload
long encryptedSize = -1;
Timer.Context encryptionTimer = vcrMetrics.blobEncryptionTime.time();
try {
messageBuf = cryptoAgent.encrypt(messageBuf);
encryptedSize = messageBuf.remaining();
} catch (GeneralSecurityException ex) {
vcrMetrics.blobEncryptionErrorCount.inc();
} finally {
encryptionTimer.stop();
}
vcrMetrics.blobEncryptionCount.inc();
CloudBlobMetadata blobMetadata = new CloudBlobMetadata(blobId, messageInfo.getOperationTimeMs(), messageInfo.getExpirationTimeInMs(), messageInfo.getSize(), EncryptionOrigin.VCR, cryptoAgent.getEncryptionContext(), cryptoAgentFactory.getClass().getName(), encryptedSize, messageInfo.getLifeVersion());
// If buffer was encrypted, we no longer know its size
long bufferLen = (encryptedSize == -1) ? size : encryptedSize;
uploaded = uploadWithRetries(blobId, messageBuf, bufferLen, blobMetadata);
} else {
// PutRequest lifeVersion from frontend is -1. Should set to 0. (0 is the starting life version number for any data).
// Put from replication or recovery should use liferVersion as it's.
short lifeVersion = messageInfo.hasLifeVersion(messageInfo.getLifeVersion()) ? messageInfo.getLifeVersion() : (short) 0;
CloudBlobMetadata blobMetadata = new CloudBlobMetadata(blobId, messageInfo.getOperationTimeMs(), messageInfo.getExpirationTimeInMs(), messageInfo.getSize(), encryptionOrigin, lifeVersion);
uploaded = uploadWithRetries(blobId, messageBuf, size, blobMetadata);
}
addToCache(blobId.getID(), (short) 0, BlobState.CREATED);
if (!uploaded && !isVcr) {
// was uploaded.
throw new StoreException(String.format("Another blob with same key %s exists in store", blobId.getID()), StoreErrorCodes.Already_Exist);
}
} else {
vcrMetrics.blobUploadSkippedCount.inc();
// expiring within {@link CloudConfig#vcrMinTtlDays} for vcr to upload.
if (isVcr && !isExpiringSoon(messageInfo) && !messageInfo.isDeleted()) {
throw new StoreException(String.format("Another blob with same key %s exists in store", messageInfo.getStoreKey().getID()), StoreErrorCodes.Already_Exist);
}
}
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureBlobDataAccessor method purgeBlobs.
/**
* Permanently delete the specified blobs in Azure storage.
* @param blobMetadataList the list of {@link CloudBlobMetadata} referencing the blobs to purge.
* @return list of {@link CloudBlobMetadata} referencing the blobs successfully purged.
* @throws BlobStorageException if the purge operation fails.
* @throws RuntimeException if the request times out before a response is received.
*/
public List<CloudBlobMetadata> purgeBlobs(List<CloudBlobMetadata> blobMetadataList) throws BlobStorageException {
List<CloudBlobMetadata> deletedBlobs = new ArrayList<>();
List<List<CloudBlobMetadata>> partitionedLists = Utils.partitionList(blobMetadataList, purgeBatchSize);
for (List<CloudBlobMetadata> batchOfBlobs : partitionedLists) {
List<Response<Void>> responseList = storageClient.deleteBatch(batchOfBlobs, batchTimeout);
for (int j = 0; j < responseList.size(); j++) {
Response<Void> response = responseList.get(j);
CloudBlobMetadata blobMetadata = batchOfBlobs.get(j);
// Note: Response.getStatusCode() throws exception on any error.
try {
response.getStatusCode();
} catch (BlobStorageException bex) {
int statusCode = bex.getStatusCode();
// Don't worry if blob is already gone
if (statusCode != HttpURLConnection.HTTP_NOT_FOUND && statusCode != HttpURLConnection.HTTP_GONE) {
logger.error("Deleting blob {} got status {}", blobMetadata.getId(), statusCode);
throw bex;
}
}
deletedBlobs.add(blobMetadata);
}
}
return deletedBlobs;
}
Aggregations