use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class AzureCloudDestinationTest method setup.
@Before
public void setup() throws Exception {
long partition = 666;
PartitionId partitionId = new MockPartitionId(partition, MockClusterMap.DEFAULT_PARTITION_CLASS);
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
CloudBlobMetadata blobMetadata = new CloudBlobMetadata(blobId, 0, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
mockServiceClient = mock(BlobServiceClient.class);
mockBlobBatchClient = mock(BlobBatchClient.class);
mockBlockBlobClient = AzureBlobDataAccessorTest.setupMockBlobClient(mockServiceClient);
mockBlobExistence(false);
mockumentClient = mock(AsyncDocumentClient.class);
Observable<ResourceResponse<Document>> mockResponse = getMockedObservableForSingleResource(blobMetadata);
when(mockumentClient.readDocument(anyString(), any(RequestOptions.class))).thenReturn(mockResponse);
when(mockumentClient.upsertDocument(anyString(), any(Object.class), any(RequestOptions.class), anyBoolean())).thenReturn(mockResponse);
when(mockumentClient.replaceDocument(any(Document.class), any(RequestOptions.class))).thenReturn(mockResponse);
when(mockumentClient.deleteDocument(anyString(), any(RequestOptions.class))).thenReturn(mockResponse);
configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_CONNECTION_STRING, storageConnection);
configProps.setProperty(AzureCloudConfig.COSMOS_ENDPOINT, "http://ambry.beyond-the-cosmos.com:443");
configProps.setProperty(AzureCloudConfig.COSMOS_COLLECTION_LINK, "ambry/metadata");
configProps.setProperty(AzureCloudConfig.COSMOS_DELETED_CONTAINER_COLLECTION_LINK, "ambry/deletedContainer");
configProps.setProperty(AzureCloudConfig.COSMOS_KEY, "cosmos-key");
configProps.setProperty("clustermap.cluster.name", "main");
configProps.setProperty("clustermap.datacenter.name", "uswest");
configProps.setProperty("clustermap.host.name", "localhost");
configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_AUTHORITY, "https://login.microsoftonline.com/test-account/");
configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_CLIENTID, "client-id");
configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_SECRET, "client-secret");
configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_ENDPOINT, "https://azure_storage.blob.core.windows.net");
configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_CLIENT_CLASS, "com.github.ambry.cloud.azure.ConnectionStringBasedStorageClient");
vcrMetrics = new VcrMetrics(new MetricRegistry());
azureMetrics = new AzureMetrics(new MetricRegistry());
clusterMap = mock(ClusterMap.class);
azureDest = new AzureCloudDestination(mockServiceClient, mockBlobBatchClient, mockumentClient, "foo", "bar", clusterName, azureMetrics, defaultAzureReplicationFeedType, clusterMap, false, configProps);
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class AzureIntegrationTest method testConcurrentUpdates.
/**
* Test that concurrent updates fail when the precondition does not match.
* We don't test retries here since CloudBlobStoreTest covers that.
*/
@Test
public void testConcurrentUpdates() throws Exception {
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
InputStream inputStream = getBlobInputStream(blobSize);
long now = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, now, now + 60000, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest);
// Different instance to simulate concurrent update in separate session.
AzureCloudDestination concurrentUpdater = getAzureDestination(verifiableProperties);
String fieldName = CloudBlobMetadata.FIELD_UPLOAD_TIME;
long newUploadTime = now++;
// Case 1: concurrent modification to blob metadata.
azureDest.getAzureBlobDataAccessor().setUpdateCallback(() -> concurrentUpdater.getAzureBlobDataAccessor().updateBlobMetadata(blobId, Collections.singletonMap(fieldName, newUploadTime), dummyCloudUpdateValidator));
try {
azureDest.updateBlobExpiration(blobId, ++now, dummyCloudUpdateValidator);
fail("Expected 412 error");
} catch (CloudStorageException csex) {
// TODO: check nested exception is BlobStorageException with status code 412
assertEquals("Expected update conflict", 1, azureDest.getAzureMetrics().blobUpdateConflictCount.getCount());
}
// Case 2: concurrent modification to Cosmos record.
azureDest.getCosmosDataAccessor().setUpdateCallback(() -> concurrentUpdater.getCosmosDataAccessor().updateMetadata(blobId, Collections.singletonMap(fieldName, Long.toString(newUploadTime))));
try {
azureDest.updateBlobExpiration(blobId, ++now, dummyCloudUpdateValidator);
fail("Expected 412 error");
} catch (CloudStorageException csex) {
assertEquals("Expected update conflict", 2, azureDest.getAzureMetrics().blobUpdateConflictCount.getCount());
}
azureDest.getCosmosDataAccessor().setUpdateCallback(null);
try {
azureDest.updateBlobExpiration(blobId, ++now, dummyCloudUpdateValidator);
} catch (Exception ex) {
fail("Expected update to succeed.");
}
assertEquals("Expected no new update conflict", 2, azureDest.getAzureMetrics().blobUpdateConflictCount.getCount());
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class AzureIntegrationTest method testRepairInconsistency.
/**
* Test that ABS/Cosmos inconsistencies get fixed on update.
*/
@Test
public void testRepairInconsistency() throws Exception {
// Upload a blob
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
InputStream inputStream = getBlobInputStream(blobSize);
long now = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, now, now + 60000, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest);
// Remove blob record from Cosmos to create inconsistency
azureDest.getCosmosDataAccessor().deleteMetadata(cloudBlobMetadata);
// Now update the blob and see if it gets fixed
azureDest.updateBlobExpiration(blobId, Utils.Infinite_Time, dummyCloudUpdateValidator);
List<CloudBlobMetadata> resultList = azureDest.getCosmosDataAccessor().queryMetadata(partitionId.toPathString(), "SELECT * FROM c WHERE c.id = '" + blobId.getID() + "'", azureDest.getAzureMetrics().missingKeysQueryTime);
assertEquals("Expected record to exist", 1, resultList.size());
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class AzureIntegrationTest method testRepairAfterIncompleteCompaction.
/**
* Test that incomplete compaction get fixed on update.
*/
@Test
public void testRepairAfterIncompleteCompaction() throws Exception {
// Upload a blob
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
InputStream inputStream = getBlobInputStream(blobSize);
long now = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, now, -1, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest);
// Mark it deleted in the past
long deletionTime = now - TimeUnit.DAYS.toMillis(7);
assertTrue("Expected delete to return true", azureDest.deleteBlob(blobId, deletionTime, (short) 0, dummyCloudUpdateValidator));
// Simulate incomplete compaction by purging it from ABS only
azureDest.getAzureBlobDataAccessor().purgeBlobs(Collections.singletonList(cloudBlobMetadata));
// Try to delete again (to trigger recovery), verify removed from Cosmos
try {
azureDest.deleteBlob(blobId, deletionTime, (short) 0, dummyCloudUpdateValidator);
} catch (CloudStorageException cex) {
assertEquals("Unexpected error code", HttpConstants.StatusCodes.NOTFOUND, cex.getStatusCode());
}
assertNull("Expected record to be purged from Cosmos", azureDest.getCosmosDataAccessor().getMetadataOrNull(blobId));
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class AzureIntegrationTest method testCompaction.
/**
* Test blob compaction.
* @throws Exception on error
*/
@Test
public void testCompaction() throws Exception {
cleanup();
int bucketCount = 20;
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
// Upload blobs in various lifecycle states
long now = System.currentTimeMillis();
long creationTime = now - TimeUnit.DAYS.toMillis(7);
for (int j = 0; j < bucketCount; j++) {
Thread.sleep(20);
logger.info("Uploading bucket {}", j);
// Active blob
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
InputStream inputStream = getBlobInputStream(blobSize);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
// Blob deleted before retention cutoff (should match)
long timeOfDeath = now - TimeUnit.DAYS.toMillis(retentionPeriodDays + 1);
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
cloudBlobMetadata.setDeletionTime(timeOfDeath);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
// Blob expired before retention cutoff (should match)
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
// Blob deleted after retention cutoff
timeOfDeath = now - TimeUnit.HOURS.toMillis(1);
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
cloudBlobMetadata.setDeletionTime(timeOfDeath);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
// Blob expired after retention cutoff
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
}
// run getDeadBlobs query, should return 2 * bucketCount
String partitionPath = String.valueOf(testPartition);
int compactedCount = azureDest.compactPartition(partitionPath);
assertEquals("Unexpected count compacted", 2 * bucketCount, compactedCount);
cleanup();
}
Aggregations