Search in sources :

Example 36 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class AzureIntegrationTest method testFindEntriesSince.

/**
 * Test findEntriesSince with specified cloud token factory.
 * @param replicationCloudTokenFactory the factory to use.
 * @throws Exception on error
 */
private void testFindEntriesSince(String replicationCloudTokenFactory) throws Exception {
    logger.info("Testing findEntriesSince with {}", replicationCloudTokenFactory);
    testProperties.setProperty(ReplicationConfig.REPLICATION_CLOUD_TOKEN_FACTORY, replicationCloudTokenFactory);
    VerifiableProperties verifiableProperties = new VerifiableProperties(testProperties);
    ReplicationConfig replicationConfig = new ReplicationConfig(verifiableProperties);
    FindTokenFactory findTokenFactory = new FindTokenHelper(null, replicationConfig).getFindTokenFactoryFromReplicaType(ReplicaType.CLOUD_BACKED);
    azureDest = (AzureCloudDestination) new AzureCloudDestinationFactory(verifiableProperties, new MetricRegistry(), clusterMap).getCloudDestination();
    cleanup();
    PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    String partitionPath = String.valueOf(testPartition);
    // Upload some blobs with different upload times
    int blobCount = 90;
    int chunkSize = 1000;
    int maxTotalSize = 20000;
    int expectedNumQueries = (blobCount * chunkSize) / maxTotalSize + 1;
    long now = System.currentTimeMillis();
    long startTime = now - TimeUnit.DAYS.toMillis(7);
    for (int j = 0; j < blobCount; j++) {
        BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        InputStream inputStream = getBlobInputStream(chunkSize);
        CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, startTime, Utils.Infinite_Time, chunkSize, CloudBlobMetadata.EncryptionOrigin.VCR, vcrKmsContext, cryptoAgentFactory, chunkSize, (short) 0);
        cloudBlobMetadata.setUploadTime(startTime + j * 1000);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, chunkSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
    }
    FindToken findToken = findTokenFactory.getNewFindToken();
    // Call findEntriesSince in a loop until no new entries are returned
    FindResult findResult;
    int numQueries = 0;
    int totalBlobsReturned = 0;
    do {
        findResult = findEntriesSinceWithRetry(partitionPath, findToken, maxTotalSize);
        findToken = findResult.getUpdatedFindToken();
        if (!findResult.getMetadataList().isEmpty()) {
            numQueries++;
        }
        totalBlobsReturned += findResult.getMetadataList().size();
    } while (!noMoreFindSinceEntries(findResult, findToken));
    assertEquals("Wrong number of queries", expectedNumQueries, numQueries);
    assertEquals("Wrong number of blobs", blobCount, totalBlobsReturned);
    assertEquals("Wrong byte count", blobCount * chunkSize, findToken.getBytesRead());
    cleanup();
}
Also used : ReplicationConfig(com.github.ambry.config.ReplicationConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) FindTokenHelper(com.github.ambry.replication.FindTokenHelper) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) FindTokenFactory(com.github.ambry.replication.FindTokenFactory) FindToken(com.github.ambry.replication.FindToken) BlobId(com.github.ambry.commons.BlobId) FindResult(com.github.ambry.cloud.FindResult)

Example 37 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class AzureIntegrationTest method testNormalFlow.

/**
 * Test normal operations.
 * @throws Exception on error
 */
@Test
public void testNormalFlow() throws Exception {
    PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
    byte[] uploadData = TestUtils.getRandomBytes(blobSize);
    InputStream inputStream = new ByteArrayInputStream(uploadData);
    long now = System.currentTimeMillis();
    CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, now, now + 60000, blobSize, CloudBlobMetadata.EncryptionOrigin.VCR, vcrKmsContext, cryptoAgentFactory, blobSize, (short) 0);
    // attempt undelete before uploading blob
    try {
        undeleteBlobWithRetry(blobId, (short) 1);
        fail("Undelete of a non existent blob should fail.");
    } catch (CloudStorageException cex) {
        assertEquals(cex.getStatusCode(), HttpConstants.StatusCodes.NOTFOUND);
    }
    assertTrue("Expected upload to return true", AzureTestUtils.uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
    // Get blob should return the same data
    verifyDownloadMatches(blobId, uploadData);
    // Try to upload same blob again
    assertFalse("Expected duplicate upload to return false", AzureTestUtils.uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, new ByteArrayInputStream(uploadData), cloudRequestAgent, azureDest));
    // ttl update
    long expirationTime = Utils.Infinite_Time;
    try {
        updateBlobExpirationWithRetry(blobId, expirationTime);
    } catch (Exception ex) {
        fail("Expected update to be successful");
    }
    CloudBlobMetadata metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
    assertEquals(expirationTime, metadata.getExpirationTime());
    // delete blob
    long deletionTime = now + 10000;
    // TODO add a test case here to verify life version after delete.
    assertTrue("Expected deletion to return true", cloudRequestAgent.doWithRetries(() -> azureDest.deleteBlob(blobId, deletionTime, (short) 0, dummyCloudUpdateValidator), "DeleteBlob", partitionId.toPathString()));
    metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
    assertEquals(deletionTime, metadata.getDeletionTime());
    // undelete blob
    assertEquals(undeleteBlobWithRetry(blobId, (short) 1), 1);
    metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
    assertEquals(metadata.getDeletionTime(), Utils.Infinite_Time);
    assertEquals(metadata.getLifeVersion(), 1);
    // undelete with a higher life version updates life version.
    assertEquals(undeleteBlobWithRetry(blobId, (short) 2), 2);
    metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
    assertEquals(metadata.getDeletionTime(), Utils.Infinite_Time);
    assertEquals(metadata.getLifeVersion(), 2);
    // delete after undelete.
    long newDeletionTime = now + 20000;
    // TODO add a test case here to verify life version after delete.
    assertTrue("Expected deletion to return true", cloudRequestAgent.doWithRetries(() -> azureDest.deleteBlob(blobId, newDeletionTime, (short) 3, dummyCloudUpdateValidator), "DeleteBlob", partitionId.toPathString()));
    metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
    assertEquals(newDeletionTime, metadata.getDeletionTime());
    // delete changes life version.
    assertEquals(metadata.getLifeVersion(), 3);
    // compact partition
    azureDest.compactPartition(partitionId.toPathString());
    assertTrue("Expected empty set after purge", getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).isEmpty());
    // Get blob should fail after purge
    try {
        verifyDownloadMatches(blobId, uploadData);
        fail("download blob should fail after data is purged");
    } catch (CloudStorageException csex) {
    }
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) ByteArrayInputStream(java.io.ByteArrayInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) CloudStorageException(com.github.ambry.cloud.CloudStorageException) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) CloudStorageException(com.github.ambry.cloud.CloudStorageException) IOException(java.io.IOException) Test(org.junit.Test)

Example 38 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class VcrRequestsTest method validateRequestTest.

/**
 * Test for {@code VcrRequests#validateRequest}
 */
@Test
public void validateRequestTest() {
    // test for null partitionid
    Assert.assertEquals(vcrRequests.validateRequest(null, null, false), ServerErrorCode.Bad_Request);
    Assert.assertEquals(vcrRequests.validateRequest(null, null, true), ServerErrorCode.Bad_Request);
    // test for unavailable partitionid
    MockPartitionId unavailablePartitionId = new MockPartitionId();
    Assert.assertEquals(vcrRequests.validateRequest(unavailablePartitionId, null, false), ServerErrorCode.No_Error);
    Assert.assertEquals(vcrRequests.validateRequest(unavailablePartitionId, null, true), ServerErrorCode.No_Error);
    // test for available partitionid
    Assert.assertEquals(vcrRequests.validateRequest(availablePartitionId, null, false), ServerErrorCode.No_Error);
    Assert.assertEquals(vcrRequests.validateRequest(availablePartitionId, null, true), ServerErrorCode.No_Error);
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) Test(org.junit.Test)

Example 39 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class CloudStorageManagerTest method controlCompactionForBlobStoreTest.

/**
 * Test {@code CloudStorageManager#controlCompactionForBlobStore}
 */
@Test
public void controlCompactionForBlobStoreTest() throws IOException {
    CloudStorageManager cloudStorageManager = createNewCloudStorageManager();
    try {
        cloudStorageManager.controlCompactionForBlobStore(new MockPartitionId(), true);
        Assert.fail("CloudStorageManager controlCompactionForBlobStore should throw unimplemented exception");
    } catch (UnsupportedOperationException e) {
    }
    try {
        cloudStorageManager.controlCompactionForBlobStore(new MockPartitionId(), false);
        Assert.fail("CloudStorageManager controlCompactionForBlobStore should throw unimplemented exception");
    } catch (UnsupportedOperationException e) {
    }
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) Test(org.junit.Test)

Example 40 with MockPartitionId

use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.

the class CloudBlobMetadataTest method setup.

@Before
public void setup() throws Exception {
    PartitionId partitionId = new MockPartitionId(partition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) Before(org.junit.Before)

Aggregations

MockPartitionId (com.github.ambry.clustermap.MockPartitionId)66 Test (org.junit.Test)51 PartitionId (com.github.ambry.clustermap.PartitionId)33 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)31 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)26 ArrayList (java.util.ArrayList)26 ReplicaId (com.github.ambry.clustermap.ReplicaId)25 BlobId (com.github.ambry.commons.BlobId)23 Port (com.github.ambry.network.Port)20 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)17 MetricRegistry (com.codahale.metrics.MetricRegistry)11 CloudBlobMetadata (com.github.ambry.cloud.CloudBlobMetadata)10 VerifiableProperties (com.github.ambry.config.VerifiableProperties)9 StorageManager (com.github.ambry.store.StorageManager)9 DataNodeId (com.github.ambry.clustermap.DataNodeId)8 BlobStoreTest (com.github.ambry.store.BlobStoreTest)8 Store (com.github.ambry.store.Store)7 ByteArrayInputStream (java.io.ByteArrayInputStream)7 Properties (java.util.Properties)7 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)6