use of com.microsoft.azure.cosmosdb.DocumentClientException in project ambry by linkedin.
the class CosmosDataAccessor method deleteMetadata.
/**
* Delete the blob metadata document in the CosmosDB collection, if it exists.
* @param blobMetadata the blob metadata document to delete.
* @return {@code true} if the record was deleted, {@code false} if it was not found.
* @throws DocumentClientException if the operation failed.
*/
boolean deleteMetadata(CloudBlobMetadata blobMetadata) throws DocumentClientException {
String docLink = getDocumentLink(blobMetadata.getId());
RequestOptions options = getRequestOptions(blobMetadata.getPartitionId());
try {
// Note: not timing here since bulk deletions are timed.
executeCosmosAction(() -> asyncDocumentClient.deleteDocument(docLink, options).toBlocking().single(), null);
return true;
} catch (DocumentClientException dex) {
if (dex.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) {
// Can happen on retry
logger.debug("Could not find metadata for blob {} to delete", blobMetadata.getId());
return false;
} else {
throw dex;
}
}
}
use of com.microsoft.azure.cosmosdb.DocumentClientException in project ambry by linkedin.
the class CosmosDataAccessor method getContainerBlobs.
/**
* Get the list of blobs in the specified partition that belong to the specified container.
* @param partitionPath the partition to query.
* @param accountId account id of the container.
* @param containerId container id of the container.
* @param queryLimit max number of blobs to return.
* @return a List of {@link CloudBlobMetadata} referencing the blobs belonging to the deprecated containers.
* @throws DocumentClientException in case of any error.
*/
List<CloudBlobMetadata> getContainerBlobs(String partitionPath, short accountId, short containerId, int queryLimit) throws DocumentClientException {
SqlQuerySpec querySpec = new SqlQuerySpec(CONTAINER_BLOBS_QUERY, new SqlParameterCollection(new SqlParameter(LIMIT_PARAM, queryLimit), new SqlParameter(CONTAINER_ID_PARAM, containerId), new SqlParameter(ACCOUNT_ID_PARAM, accountId)));
FeedOptions feedOptions = new FeedOptions();
feedOptions.setMaxItemCount(queryLimit);
feedOptions.setResponseContinuationTokenLimitInKb(continuationTokenLimitKb);
feedOptions.setPartitionKey(new PartitionKey(partitionPath));
try {
Iterator<FeedResponse<Document>> iterator = executeCosmosQuery(partitionPath, querySpec, feedOptions, azureMetrics.deletedContainerBlobsQueryTime).getIterator();
List<CloudBlobMetadata> containerBlobsList = new ArrayList<>();
double requestCharge = 0.0;
while (iterator.hasNext()) {
FeedResponse<Document> response = iterator.next();
requestCharge += response.getRequestCharge();
response.getResults().iterator().forEachRemaining(doc -> containerBlobsList.add(createMetadataFromDocument(doc)));
}
if (requestCharge >= requestChargeThreshold) {
logger.info("Deleted container blobs query partition {} containerId {} accountId {} request charge {} for {} records", partitionPath, containerId, accountId, requestCharge, containerBlobsList.size());
}
return containerBlobsList;
} catch (RuntimeException rex) {
if (rex.getCause() instanceof DocumentClientException) {
logger.warn("Dead blobs query {} partition {} got {}", querySpec.getQueryText(), partitionPath, ((DocumentClientException) rex.getCause()).getStatusCode());
throw (DocumentClientException) rex.getCause();
}
throw rex;
}
}
use of com.microsoft.azure.cosmosdb.DocumentClientException in project ambry by linkedin.
the class CosmosDataAccessor method bulkDeleteMetadata.
/**
* Delete the blob metadata documents from CosmosDB using the BulkDelete stored procedure.
* @param blobMetadataList the list of blob metadata documents to delete.
* @return the number of documents deleted.
* @throws DocumentClientException if the operation failed.
*/
private int bulkDeleteMetadata(List<CloudBlobMetadata> blobMetadataList) throws DocumentClientException {
String partitionPath = blobMetadataList.get(0).getPartitionId();
RequestOptions options = getRequestOptions(partitionPath);
// stored proc link provided in config. Test for it at startup and use if available.
String quotedBlobIds = blobMetadataList.stream().map(metadata -> '"' + metadata.getId() + '"').collect(Collectors.joining(","));
String query = String.format(BULK_DELETE_QUERY, quotedBlobIds);
String sprocLink = cosmosCollectionLink + BULK_DELETE_SPROC;
boolean more = true;
int deleteCount = 0;
double requestCharge = 0;
try {
while (more) {
StoredProcedureResponse response = asyncDocumentClient.executeStoredProcedure(sprocLink, options, new String[] { query }).toBlocking().single();
requestCharge += response.getRequestCharge();
Document responseDoc = response.getResponseAsDocument();
more = responseDoc.getBoolean(PROPERTY_CONTINUATION);
deleteCount += responseDoc.getInt(PROPERTY_DELETED);
}
if (requestCharge >= requestChargeThreshold) {
logger.info("Bulk delete partition {} request charge {} for {} records", partitionPath, requestCharge, deleteCount);
}
return deleteCount;
} catch (RuntimeException rex) {
if (rex.getCause() instanceof DocumentClientException) {
throw (DocumentClientException) rex.getCause();
} else {
throw rex;
}
}
}
use of com.microsoft.azure.cosmosdb.DocumentClientException in project ambry by linkedin.
the class CosmosDataAccessor method updateMetadata.
/**
* Update the blob metadata document in the CosmosDB collection.
* @param blobId the {@link BlobId} for which metadata is replaced.
* @param updateFields Map of field names and new values to update.
* @return the {@link ResourceResponse} returned by the operation, if successful.
* Returns {@Null} if the field already has the specified value.
* @throws DocumentClientException if the record was not found or if the operation failed.
*/
ResourceResponse<Document> updateMetadata(BlobId blobId, Map<String, String> updateFields) throws DocumentClientException {
// Read the existing record
String docLink = getDocumentLink(blobId.getID());
RequestOptions options = getRequestOptions(blobId.getPartition().toPathString());
ResourceResponse<Document> readResponse = executeCosmosAction(() -> asyncDocumentClient.readDocument(docLink, options).toBlocking().single(), azureMetrics.documentReadTime);
Document doc = readResponse.getResource();
// Update only if value has changed
Map<String, String> fieldsToUpdate = updateFields.entrySet().stream().filter(map -> !String.valueOf(updateFields.get(map.getKey())).equals(doc.get(map.getKey()))).collect(Collectors.toMap(Map.Entry::getKey, map -> String.valueOf(map.getValue())));
if (fieldsToUpdate.size() == 0) {
logger.debug("No change in value for {} in blob {}", updateFields.keySet(), blobId.getID());
return null;
}
// For testing conflict handling
if (updateCallback != null) {
try {
updateCallback.call();
} catch (Exception ex) {
logger.error("Error in update callback", ex);
}
}
// Perform the update
fieldsToUpdate.forEach((key, value) -> doc.set(key, value));
// Set condition to ensure we don't clobber a concurrent update
AccessCondition accessCondition = new AccessCondition();
accessCondition.setCondition(doc.getETag());
options.setAccessCondition(accessCondition);
try {
return executeCosmosAction(() -> asyncDocumentClient.replaceDocument(doc, options).toBlocking().single(), azureMetrics.documentUpdateTime);
} catch (DocumentClientException e) {
if (e.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED) {
azureMetrics.blobUpdateConflictCount.inc();
}
throw e;
}
}
use of com.microsoft.azure.cosmosdb.DocumentClientException in project ambry by linkedin.
the class CosmosDataAccessor method queryChangeFeed.
/**
* Query Cosmos change feed to get the next set of {@code CloudBlobMetadata} objects in specified {@code partitionPath}
* after {@code requestContinationToken}, capped by specified {@code maxFeedSize} representing the max number of items to
* be queried from the change feed.
* @param requestContinuationToken Continuation token after which change feed is requested.
* @param maxFeedSize max item count to be requested in the feed query.
* @param changeFeed {@link CloudBlobMetadata} {@code List} to be populated with the next set of entries returned by change feed query.
* @param partitionPath partition for which the change feed is requested.
* @param timer the {@link Timer} to use to record query time (excluding waiting).
* @return next continuation token.
* @throws DocumentClientException
*/
public String queryChangeFeed(String requestContinuationToken, int maxFeedSize, List<CloudBlobMetadata> changeFeed, String partitionPath, Timer timer) throws DocumentClientException {
azureMetrics.changeFeedQueryCount.inc();
ChangeFeedOptions changeFeedOptions = new ChangeFeedOptions();
changeFeedOptions.setPartitionKey(new PartitionKey(partitionPath));
changeFeedOptions.setMaxItemCount(maxFeedSize);
if (Utils.isNullOrEmpty(requestContinuationToken)) {
changeFeedOptions.setStartFromBeginning(true);
} else {
changeFeedOptions.setRequestContinuation(requestContinuationToken);
}
try {
FeedResponse<Document> feedResponse = executeCosmosChangeFeedQuery(changeFeedOptions, timer);
feedResponse.getResults().stream().map(this::createMetadataFromDocument).forEach(changeFeed::add);
return feedResponse.getResponseContinuation();
} catch (RuntimeException rex) {
azureMetrics.changeFeedQueryFailureCount.inc();
if (rex.getCause() instanceof DocumentClientException) {
throw (DocumentClientException) rex.getCause();
}
throw rex;
} catch (Exception ex) {
azureMetrics.changeFeedQueryFailureCount.inc();
throw ex;
}
}
Aggregations