Search in sources :

Example 6 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class AzureCloudDestinationTest method setup.

@Before
public void setup() throws Exception {
    long partition = 666;
    PartitionId partitionId = new MockPartitionId(partition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
    CloudBlobMetadata blobMetadata = new CloudBlobMetadata(blobId, 0, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
    mockServiceClient = mock(BlobServiceClient.class);
    mockBlobBatchClient = mock(BlobBatchClient.class);
    mockBlockBlobClient = AzureBlobDataAccessorTest.setupMockBlobClient(mockServiceClient);
    mockBlobExistence(false);
    mockumentClient = mock(AsyncDocumentClient.class);
    Observable<ResourceResponse<Document>> mockResponse = getMockedObservableForSingleResource(blobMetadata);
    when(mockumentClient.readDocument(anyString(), any(RequestOptions.class))).thenReturn(mockResponse);
    when(mockumentClient.upsertDocument(anyString(), any(Object.class), any(RequestOptions.class), anyBoolean())).thenReturn(mockResponse);
    when(mockumentClient.replaceDocument(any(Document.class), any(RequestOptions.class))).thenReturn(mockResponse);
    when(mockumentClient.deleteDocument(anyString(), any(RequestOptions.class))).thenReturn(mockResponse);
    configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_CONNECTION_STRING, storageConnection);
    configProps.setProperty(AzureCloudConfig.COSMOS_ENDPOINT, "http://ambry.beyond-the-cosmos.com:443");
    configProps.setProperty(AzureCloudConfig.COSMOS_COLLECTION_LINK, "ambry/metadata");
    configProps.setProperty(AzureCloudConfig.COSMOS_DELETED_CONTAINER_COLLECTION_LINK, "ambry/deletedContainer");
    configProps.setProperty(AzureCloudConfig.COSMOS_KEY, "cosmos-key");
    configProps.setProperty("clustermap.cluster.name", "main");
    configProps.setProperty("clustermap.datacenter.name", "uswest");
    configProps.setProperty("clustermap.host.name", "localhost");
    configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_AUTHORITY, "https://login.microsoftonline.com/test-account/");
    configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_CLIENTID, "client-id");
    configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_SECRET, "client-secret");
    configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_ENDPOINT, "https://azure_storage.blob.core.windows.net");
    configProps.setProperty(AzureCloudConfig.AZURE_STORAGE_CLIENT_CLASS, "com.github.ambry.cloud.azure.ConnectionStringBasedStorageClient");
    vcrMetrics = new VcrMetrics(new MetricRegistry());
    azureMetrics = new AzureMetrics(new MetricRegistry());
    clusterMap = mock(ClusterMap.class);
    azureDest = new AzureCloudDestination(mockServiceClient, mockBlobBatchClient, mockumentClient, "foo", "bar", clusterName, azureMetrics, defaultAzureReplicationFeedType, clusterMap, false, configProps);
}
Also used : ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) VcrMetrics(com.github.ambry.cloud.VcrMetrics) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) RequestOptions(com.microsoft.azure.cosmosdb.RequestOptions) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) Document(com.microsoft.azure.cosmosdb.Document) ResourceResponse(com.microsoft.azure.cosmosdb.ResourceResponse) BlobServiceClient(com.azure.storage.blob.BlobServiceClient) BlobBatchClient(com.azure.storage.blob.batch.BlobBatchClient) BlobId(com.github.ambry.commons.BlobId) AsyncDocumentClient(com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient) Before(org.junit.Before)

Example 7 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class AzureIntegrationTest method testConcurrentUpdates.

/**
 * Test that concurrent updates fail when the precondition does not match.
 * We don't test retries here since CloudBlobStoreTest covers that.
 */
@Test
public void testConcurrentUpdates() throws Exception {
    PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
    InputStream inputStream = getBlobInputStream(blobSize);
    long now = System.currentTimeMillis();
    CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, now, now + 60000, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
    uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest);
    // Different instance to simulate concurrent update in separate session.
    AzureCloudDestination concurrentUpdater = getAzureDestination(verifiableProperties);
    String fieldName = CloudBlobMetadata.FIELD_UPLOAD_TIME;
    long newUploadTime = now++;
    // Case 1: concurrent modification to blob metadata.
    azureDest.getAzureBlobDataAccessor().setUpdateCallback(() -> concurrentUpdater.getAzureBlobDataAccessor().updateBlobMetadata(blobId, Collections.singletonMap(fieldName, newUploadTime), dummyCloudUpdateValidator));
    try {
        azureDest.updateBlobExpiration(blobId, ++now, dummyCloudUpdateValidator);
        fail("Expected 412 error");
    } catch (CloudStorageException csex) {
        // TODO: check nested exception is BlobStorageException with status code 412
        assertEquals("Expected update conflict", 1, azureDest.getAzureMetrics().blobUpdateConflictCount.getCount());
    }
    // Case 2: concurrent modification to Cosmos record.
    azureDest.getCosmosDataAccessor().setUpdateCallback(() -> concurrentUpdater.getCosmosDataAccessor().updateMetadata(blobId, Collections.singletonMap(fieldName, Long.toString(newUploadTime))));
    try {
        azureDest.updateBlobExpiration(blobId, ++now, dummyCloudUpdateValidator);
        fail("Expected 412 error");
    } catch (CloudStorageException csex) {
        assertEquals("Expected update conflict", 2, azureDest.getAzureMetrics().blobUpdateConflictCount.getCount());
    }
    azureDest.getCosmosDataAccessor().setUpdateCallback(null);
    try {
        azureDest.updateBlobExpiration(blobId, ++now, dummyCloudUpdateValidator);
    } catch (Exception ex) {
        fail("Expected update to succeed.");
    }
    assertEquals("Expected no new update conflict", 2, azureDest.getAzureMetrics().blobUpdateConflictCount.getCount());
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) CloudStorageException(com.github.ambry.cloud.CloudStorageException) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) CloudStorageException(com.github.ambry.cloud.CloudStorageException) IOException(java.io.IOException) Test(org.junit.Test)

Example 8 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class AzureIntegrationTest method testRepairInconsistency.

/**
 * Test that ABS/Cosmos inconsistencies get fixed on update.
 */
@Test
public void testRepairInconsistency() throws Exception {
    // Upload a blob
    PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
    InputStream inputStream = getBlobInputStream(blobSize);
    long now = System.currentTimeMillis();
    CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, now, now + 60000, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
    uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest);
    // Remove blob record from Cosmos to create inconsistency
    azureDest.getCosmosDataAccessor().deleteMetadata(cloudBlobMetadata);
    // Now update the blob and see if it gets fixed
    azureDest.updateBlobExpiration(blobId, Utils.Infinite_Time, dummyCloudUpdateValidator);
    List<CloudBlobMetadata> resultList = azureDest.getCosmosDataAccessor().queryMetadata(partitionId.toPathString(), "SELECT * FROM c WHERE c.id = '" + blobId.getID() + "'", azureDest.getAzureMetrics().missingKeysQueryTime);
    assertEquals("Expected record to exist", 1, resultList.size());
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Example 9 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class AzureIntegrationTest method testRepairAfterIncompleteCompaction.

/**
 * Test that incomplete compaction get fixed on update.
 */
@Test
public void testRepairAfterIncompleteCompaction() throws Exception {
    // Upload a blob
    PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
    InputStream inputStream = getBlobInputStream(blobSize);
    long now = System.currentTimeMillis();
    CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, now, -1, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
    uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest);
    // Mark it deleted in the past
    long deletionTime = now - TimeUnit.DAYS.toMillis(7);
    assertTrue("Expected delete to return true", azureDest.deleteBlob(blobId, deletionTime, (short) 0, dummyCloudUpdateValidator));
    // Simulate incomplete compaction by purging it from ABS only
    azureDest.getAzureBlobDataAccessor().purgeBlobs(Collections.singletonList(cloudBlobMetadata));
    // Try to delete again (to trigger recovery), verify removed from Cosmos
    try {
        azureDest.deleteBlob(blobId, deletionTime, (short) 0, dummyCloudUpdateValidator);
    } catch (CloudStorageException cex) {
        assertEquals("Unexpected error code", HttpConstants.StatusCodes.NOTFOUND, cex.getStatusCode());
    }
    assertNull("Expected record to be purged from Cosmos", azureDest.getCosmosDataAccessor().getMetadataOrNull(blobId));
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) CloudStorageException(com.github.ambry.cloud.CloudStorageException) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Example 10 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class AzureIntegrationTest method testCompaction.

/**
 * Test blob compaction.
 * @throws Exception on error
 */
@Test
public void testCompaction() throws Exception {
    cleanup();
    int bucketCount = 20;
    PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    // Upload blobs in various lifecycle states
    long now = System.currentTimeMillis();
    long creationTime = now - TimeUnit.DAYS.toMillis(7);
    for (int j = 0; j < bucketCount; j++) {
        Thread.sleep(20);
        logger.info("Uploading bucket {}", j);
        // Active blob
        BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        InputStream inputStream = getBlobInputStream(blobSize);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
        // Blob deleted before retention cutoff (should match)
        long timeOfDeath = now - TimeUnit.DAYS.toMillis(retentionPeriodDays + 1);
        blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        cloudBlobMetadata.setDeletionTime(timeOfDeath);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
        // Blob expired before retention cutoff (should match)
        blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
        // Blob deleted after retention cutoff
        timeOfDeath = now - TimeUnit.HOURS.toMillis(1);
        blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        cloudBlobMetadata.setDeletionTime(timeOfDeath);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
        // Blob expired after retention cutoff
        blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
    }
    // run getDeadBlobs query, should return 2 * bucketCount
    String partitionPath = String.valueOf(testPartition);
    int compactedCount = azureDest.compactPartition(partitionPath);
    assertEquals("Unexpected count compacted", 2 * bucketCount, compactedCount);
    cleanup();
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Aggregations

MockPartitionId (com.github.ambry.clustermap.MockPartitionId)66 Test (org.junit.Test)51 PartitionId (com.github.ambry.clustermap.PartitionId)33 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)31 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)26 ArrayList (java.util.ArrayList)26 ReplicaId (com.github.ambry.clustermap.ReplicaId)25 BlobId (com.github.ambry.commons.BlobId)23 Port (com.github.ambry.network.Port)20 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)17 MetricRegistry (com.codahale.metrics.MetricRegistry)11 CloudBlobMetadata (com.github.ambry.cloud.CloudBlobMetadata)10 VerifiableProperties (com.github.ambry.config.VerifiableProperties)9 StorageManager (com.github.ambry.store.StorageManager)9 DataNodeId (com.github.ambry.clustermap.DataNodeId)8 BlobStoreTest (com.github.ambry.store.BlobStoreTest)8 Store (com.github.ambry.store.Store)7 ByteArrayInputStream (java.io.ByteArrayInputStream)7 Properties (java.util.Properties)7 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)6