use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureIntegrationTest method testFindEntriesSince.
/**
* Test findEntriesSince with specified cloud token factory.
* @param replicationCloudTokenFactory the factory to use.
* @throws Exception on error
*/
private void testFindEntriesSince(String replicationCloudTokenFactory) throws Exception {
logger.info("Testing findEntriesSince with {}", replicationCloudTokenFactory);
testProperties.setProperty(ReplicationConfig.REPLICATION_CLOUD_TOKEN_FACTORY, replicationCloudTokenFactory);
VerifiableProperties verifiableProperties = new VerifiableProperties(testProperties);
ReplicationConfig replicationConfig = new ReplicationConfig(verifiableProperties);
FindTokenFactory findTokenFactory = new FindTokenHelper(null, replicationConfig).getFindTokenFactoryFromReplicaType(ReplicaType.CLOUD_BACKED);
azureDest = (AzureCloudDestination) new AzureCloudDestinationFactory(verifiableProperties, new MetricRegistry(), clusterMap).getCloudDestination();
cleanup();
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
String partitionPath = String.valueOf(testPartition);
// Upload some blobs with different upload times
int blobCount = 90;
int chunkSize = 1000;
int maxTotalSize = 20000;
int expectedNumQueries = (blobCount * chunkSize) / maxTotalSize + 1;
long now = System.currentTimeMillis();
long startTime = now - TimeUnit.DAYS.toMillis(7);
for (int j = 0; j < blobCount; j++) {
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
InputStream inputStream = getBlobInputStream(chunkSize);
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, startTime, Utils.Infinite_Time, chunkSize, CloudBlobMetadata.EncryptionOrigin.VCR, vcrKmsContext, cryptoAgentFactory, chunkSize, (short) 0);
cloudBlobMetadata.setUploadTime(startTime + j * 1000);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, chunkSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
}
FindToken findToken = findTokenFactory.getNewFindToken();
// Call findEntriesSince in a loop until no new entries are returned
FindResult findResult;
int numQueries = 0;
int totalBlobsReturned = 0;
do {
findResult = findEntriesSinceWithRetry(partitionPath, findToken, maxTotalSize);
findToken = findResult.getUpdatedFindToken();
if (!findResult.getMetadataList().isEmpty()) {
numQueries++;
}
totalBlobsReturned += findResult.getMetadataList().size();
} while (!noMoreFindSinceEntries(findResult, findToken));
assertEquals("Wrong number of queries", expectedNumQueries, numQueries);
assertEquals("Wrong number of blobs", blobCount, totalBlobsReturned);
assertEquals("Wrong byte count", blobCount * chunkSize, findToken.getBytesRead());
cleanup();
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureIntegrationTest method testNormalFlow.
/**
* Test normal operations.
* @throws Exception on error
*/
@Test
public void testNormalFlow() throws Exception {
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
byte[] uploadData = TestUtils.getRandomBytes(blobSize);
InputStream inputStream = new ByteArrayInputStream(uploadData);
long now = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, now, now + 60000, blobSize, CloudBlobMetadata.EncryptionOrigin.VCR, vcrKmsContext, cryptoAgentFactory, blobSize, (short) 0);
// attempt undelete before uploading blob
try {
undeleteBlobWithRetry(blobId, (short) 1);
fail("Undelete of a non existent blob should fail.");
} catch (CloudStorageException cex) {
assertEquals(cex.getStatusCode(), HttpConstants.StatusCodes.NOTFOUND);
}
assertTrue("Expected upload to return true", AzureTestUtils.uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
// Get blob should return the same data
verifyDownloadMatches(blobId, uploadData);
// Try to upload same blob again
assertFalse("Expected duplicate upload to return false", AzureTestUtils.uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, new ByteArrayInputStream(uploadData), cloudRequestAgent, azureDest));
// ttl update
long expirationTime = Utils.Infinite_Time;
try {
updateBlobExpirationWithRetry(blobId, expirationTime);
} catch (Exception ex) {
fail("Expected update to be successful");
}
CloudBlobMetadata metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(expirationTime, metadata.getExpirationTime());
// delete blob
long deletionTime = now + 10000;
// TODO add a test case here to verify life version after delete.
assertTrue("Expected deletion to return true", cloudRequestAgent.doWithRetries(() -> azureDest.deleteBlob(blobId, deletionTime, (short) 0, dummyCloudUpdateValidator), "DeleteBlob", partitionId.toPathString()));
metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(deletionTime, metadata.getDeletionTime());
// undelete blob
assertEquals(undeleteBlobWithRetry(blobId, (short) 1), 1);
metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(metadata.getDeletionTime(), Utils.Infinite_Time);
assertEquals(metadata.getLifeVersion(), 1);
// undelete with a higher life version updates life version.
assertEquals(undeleteBlobWithRetry(blobId, (short) 2), 2);
metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(metadata.getDeletionTime(), Utils.Infinite_Time);
assertEquals(metadata.getLifeVersion(), 2);
// delete after undelete.
long newDeletionTime = now + 20000;
// TODO add a test case here to verify life version after delete.
assertTrue("Expected deletion to return true", cloudRequestAgent.doWithRetries(() -> azureDest.deleteBlob(blobId, newDeletionTime, (short) 3, dummyCloudUpdateValidator), "DeleteBlob", partitionId.toPathString()));
metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(newDeletionTime, metadata.getDeletionTime());
// delete changes life version.
assertEquals(metadata.getLifeVersion(), 3);
// compact partition
azureDest.compactPartition(partitionId.toPathString());
assertTrue("Expected empty set after purge", getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).isEmpty());
// Get blob should fail after purge
try {
verifyDownloadMatches(blobId, uploadData);
fail("download blob should fail after data is purged");
} catch (CloudStorageException csex) {
}
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CosmosUpdateTimeBasedReplicationFeed method getNextEntriesAndUpdatedToken.
@Override
public FindResult getNextEntriesAndUpdatedToken(FindToken curfindToken, long maxTotalSizeOfEntries, String partitionPath) throws DocumentClientException {
Timer.Context operationTimer = azureMetrics.replicationFeedQueryTime.time();
try {
CosmosUpdateTimeFindToken findToken = (CosmosUpdateTimeFindToken) curfindToken;
SqlQuerySpec entriesSinceQuery = new SqlQuerySpec(ENTRIES_SINCE_QUERY_TEMPLATE, new SqlParameterCollection(new SqlParameter(LIMIT_PARAM, queryBatchSize), new SqlParameter(TIME_SINCE_PARAM, findToken.getLastUpdateTime())));
List<CloudBlobMetadata> queryResults = cosmosDataAccessor.queryMetadata(partitionPath, entriesSinceQuery, azureMetrics.findSinceQueryTime);
if (queryResults.isEmpty()) {
return new FindResult(new ArrayList<>(), findToken);
}
if (queryResults.get(0).getLastUpdateTime() == findToken.getLastUpdateTime()) {
filterOutLastReadBlobs(queryResults, findToken.getLastUpdateTimeReadBlobIds(), findToken.getLastUpdateTime());
}
List<CloudBlobMetadata> cappedResults = CloudBlobMetadata.capMetadataListBySize(queryResults, maxTotalSizeOfEntries);
return new FindResult(cappedResults, CosmosUpdateTimeFindToken.getUpdatedToken(findToken, cappedResults));
} finally {
operationTimer.stop();
}
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureStorageCompactorTest method testCompactionCheckpoints.
/**
* Test compaction checkpoint behavior
*/
@Test
public void testCompactionCheckpoints() throws Exception {
AzureStorageCompactor compactorSpy = spy(azureStorageCompactor);
doReturn(true).when(compactorSpy).updateCompactionProgress(anyString(), anyString(), anyLong());
String fieldName = CloudBlobMetadata.FIELD_DELETION_TIME;
long startTime = testTime - TimeUnit.DAYS.toMillis(numBlobsPerQuery);
long endTime = testTime;
// When dead blobs query returns results, progress gets updated to last record's dead time
List<Document> docList = new ArrayList<>();
long lastDeadTime = 0;
for (int j = 0; j < numBlobsPerQuery; j++) {
BlobId blobId = generateBlobId();
CloudBlobMetadata inputMetadata = new CloudBlobMetadata(blobId, testTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
lastDeadTime = startTime + TimeUnit.HOURS.toMillis(j);
inputMetadata.setDeletionTime(lastDeadTime);
blobMetadataList.add(inputMetadata);
docList.add(AzureTestUtils.createDocumentFromCloudBlobMetadata(inputMetadata));
}
Observable<FeedResponse<Document>> mockResponse = mock(Observable.class);
mockObservableForQuery(docList, mockResponse);
when(mockumentClient.queryDocuments(anyString(), any(SqlQuerySpec.class), any(FeedOptions.class))).thenReturn(mockResponse);
compactorSpy.compactPartition(partitionPath, fieldName, startTime, endTime);
verify(compactorSpy, atLeastOnce()).updateCompactionProgress(eq(partitionPath), eq(fieldName), eq(lastDeadTime));
verify(compactorSpy, never()).updateCompactionProgress(eq(partitionPath), eq(fieldName), eq(endTime));
// When dead blobs query returns no results, progress gets updated to queryEndtime
mockResponse = getMockedObservableForQueryWithNoResults();
when(mockumentClient.queryDocuments(anyString(), any(SqlQuerySpec.class), any(FeedOptions.class))).thenReturn(mockResponse);
compactorSpy.compactPartition(partitionPath, fieldName, startTime, endTime);
verify(compactorSpy).updateCompactionProgress(eq(partitionPath), eq(fieldName), eq(endTime));
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CosmosDataAccessorTest method testQueryNormal.
/**
* Test query metadata.
*/
@Test
public void testQueryNormal() throws Exception {
Observable<FeedResponse<Document>> mockResponse = mock(Observable.class);
List<Document> docList = Collections.singletonList(AzureTestUtils.createDocumentFromCloudBlobMetadata(blobMetadata));
mockObservableForQuery(docList, mockResponse);
when(mockumentClient.queryDocuments(anyString(), any(SqlQuerySpec.class), any(FeedOptions.class))).thenReturn(mockResponse);
List<CloudBlobMetadata> metadataList = doQueryMetadata();
assertEquals("Expected single entry", 1, metadataList.size());
CloudBlobMetadata outputMetadata = metadataList.get(0);
assertEquals("Returned metadata does not match original", blobMetadata, outputMetadata);
assertEquals(1, azureMetrics.documentQueryCount.getCount());
assertEquals(1, azureMetrics.missingKeysQueryTime.getCount());
}
Aggregations