use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureIntegrationTest method testCompaction.
/**
* Test blob compaction.
* @throws Exception on error
*/
@Test
public void testCompaction() throws Exception {
cleanup();
int bucketCount = 20;
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
// Upload blobs in various lifecycle states
long now = System.currentTimeMillis();
long creationTime = now - TimeUnit.DAYS.toMillis(7);
for (int j = 0; j < bucketCount; j++) {
Thread.sleep(20);
logger.info("Uploading bucket {}", j);
// Active blob
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
InputStream inputStream = getBlobInputStream(blobSize);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
// Blob deleted before retention cutoff (should match)
long timeOfDeath = now - TimeUnit.DAYS.toMillis(retentionPeriodDays + 1);
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
cloudBlobMetadata.setDeletionTime(timeOfDeath);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
// Blob expired before retention cutoff (should match)
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
// Blob deleted after retention cutoff
timeOfDeath = now - TimeUnit.HOURS.toMillis(1);
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
cloudBlobMetadata.setDeletionTime(timeOfDeath);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
// Blob expired after retention cutoff
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
}
// run getDeadBlobs query, should return 2 * bucketCount
String partitionPath = String.valueOf(testPartition);
int compactedCount = azureDest.compactPartition(partitionPath);
assertEquals("Unexpected count compacted", 2 * bucketCount, compactedCount);
cleanup();
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CosmosUpdateTimeBasedReplicationFeed method filterOutLastReadBlobs.
/**
* Filter out {@link CloudBlobMetadata} objects from lastUpdateTime ordered {@code cloudBlobMetadataList} whose
* lastUpdateTime is {@code lastUpdateTime} and id is in {@code lastReadBlobIds}.
* @param cloudBlobMetadataList list of {@link CloudBlobMetadata} objects to filter out from.
* @param lastReadBlobIds set if blobIds which need to be filtered out.
* @param lastUpdateTime lastUpdateTime of the blobIds to filter out.
*/
private void filterOutLastReadBlobs(List<CloudBlobMetadata> cloudBlobMetadataList, Set<String> lastReadBlobIds, long lastUpdateTime) {
ListIterator<CloudBlobMetadata> iterator = cloudBlobMetadataList.listIterator();
int numRemovedBlobs = 0;
while (iterator.hasNext()) {
CloudBlobMetadata cloudBlobMetadata = iterator.next();
if (numRemovedBlobs == lastReadBlobIds.size() || cloudBlobMetadata.getLastUpdateTime() > lastUpdateTime) {
break;
}
if (lastReadBlobIds.contains(cloudBlobMetadata.getId())) {
iterator.remove();
numRemovedBlobs++;
}
}
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureTestUtils method createUnencryptedPermanentBlobs.
/**
* Utility method to create specified number of unencrypted blobs with permanent ttl and with specified properties.
* @param numBlobs number of blobs to create.
* @param dataCenterId datacenter id.
* @param accountId account id.
* @param containerId container id.
* @param partitionId {@link PartitionId} of the partition in which blobs will be created.
* @param blobSize size of blobs.
* @param cloudRequestAgent {@link CloudRequestAgent} object.
* @param azureDest {@link AzureCloudDestination} object.
* @param creationTime blob creation time.
* @return A {@link Map} of create blobs' {@link BlobId} and data.
* @throws CloudStorageException in case of any exception while uploading blob.
*/
static Map<BlobId, byte[]> createUnencryptedPermanentBlobs(int numBlobs, byte dataCenterId, short accountId, short containerId, PartitionId partitionId, int blobSize, CloudRequestAgent cloudRequestAgent, AzureCloudDestination azureDest, long creationTime) throws CloudStorageException {
Map<BlobId, byte[]> blobIdtoDataMap = new HashMap<>();
for (int j = 0; j < numBlobs; j++) {
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
byte[] randomBytes = TestUtils.getRandomBytes(blobSize);
blobIdtoDataMap.put(blobId, randomBytes);
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, new ByteArrayInputStream(randomBytes), cloudRequestAgent, azureDest));
}
return blobIdtoDataMap;
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureStorageCompactorTest method buildCompactor.
private void buildCompactor(Properties configProps) throws Exception {
CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(configProps));
VcrMetrics vcrMetrics = new VcrMetrics(new MetricRegistry());
azureBlobDataAccessor = new AzureBlobDataAccessor(mockServiceClient, mockBlobBatchClient, clusterName, azureMetrics, new AzureCloudConfig(new VerifiableProperties(configProps)));
cosmosDataAccessor = new CosmosDataAccessor(mockumentClient, collectionLink, cosmosDeletedContainerCollectionLink, vcrMetrics, azureMetrics);
azureStorageCompactor = new AzureStorageCompactor(azureBlobDataAccessor, cosmosDataAccessor, cloudConfig, vcrMetrics, azureMetrics);
// Mocks for getDeadBlobs query
List<Document> docList = new ArrayList<>();
for (int j = 0; j < numBlobsPerQuery; j++) {
BlobId blobId = generateBlobId();
CloudBlobMetadata inputMetadata = new CloudBlobMetadata(blobId, testTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
blobMetadataList.add(inputMetadata);
docList.add(AzureTestUtils.createDocumentFromCloudBlobMetadata(inputMetadata));
}
Observable<FeedResponse<Document>> mockResponse = mock(Observable.class);
mockObservableForQuery(docList, mockResponse);
when(mockumentClient.queryDocuments(anyString(), any(SqlQuerySpec.class), any(FeedOptions.class))).thenReturn(mockResponse);
// Mocks for purge
BlobBatch mockBatch = mock(BlobBatch.class);
when(mockBlobBatchClient.getBlobBatch()).thenReturn(mockBatch);
Response<Void> okResponse = mock(Response.class);
when(okResponse.getStatusCode()).thenReturn(202);
when(mockBatch.deleteBlob(anyString(), anyString())).thenReturn(okResponse);
Observable<StoredProcedureResponse> mockBulkDeleteResponse = getMockBulkDeleteResponse(1);
when(mockumentClient.executeStoredProcedure(anyString(), any(RequestOptions.class), any())).thenReturn(mockBulkDeleteResponse);
String checkpointJson = objectMapper.writeValueAsString(AzureStorageCompactor.emptyCheckpoints);
mockCheckpointDownload(true, checkpointJson);
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CosmosDataAccessorTest method setup.
@Before
public void setup() {
mockumentClient = mock(AsyncDocumentClient.class);
byte dataCenterId = 66;
short accountId = 101;
short containerId = 5;
PartitionId partitionId = new MockPartitionId();
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
blobMetadata = new CloudBlobMetadata(blobId, System.currentTimeMillis(), Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
azureMetrics = new AzureMetrics(new MetricRegistry());
VcrMetrics vcrMetrics = new VcrMetrics(new MetricRegistry());
cosmosAccessor = new CosmosDataAccessor(mockumentClient, "ambry/metadata", "ambry/deletedContainer", vcrMetrics, azureMetrics);
}
Aggregations