use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class AzureIntegrationTest method testFindEntriesSince.
/**
* Test findEntriesSince with specified cloud token factory.
* @param replicationCloudTokenFactory the factory to use.
* @throws Exception on error
*/
private void testFindEntriesSince(String replicationCloudTokenFactory) throws Exception {
logger.info("Testing findEntriesSince with {}", replicationCloudTokenFactory);
testProperties.setProperty(ReplicationConfig.REPLICATION_CLOUD_TOKEN_FACTORY, replicationCloudTokenFactory);
VerifiableProperties verifiableProperties = new VerifiableProperties(testProperties);
ReplicationConfig replicationConfig = new ReplicationConfig(verifiableProperties);
FindTokenFactory findTokenFactory = new FindTokenHelper(null, replicationConfig).getFindTokenFactoryFromReplicaType(ReplicaType.CLOUD_BACKED);
azureDest = (AzureCloudDestination) new AzureCloudDestinationFactory(verifiableProperties, new MetricRegistry(), clusterMap).getCloudDestination();
cleanup();
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
String partitionPath = String.valueOf(testPartition);
// Upload some blobs with different upload times
int blobCount = 90;
int chunkSize = 1000;
int maxTotalSize = 20000;
int expectedNumQueries = (blobCount * chunkSize) / maxTotalSize + 1;
long now = System.currentTimeMillis();
long startTime = now - TimeUnit.DAYS.toMillis(7);
for (int j = 0; j < blobCount; j++) {
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
InputStream inputStream = getBlobInputStream(chunkSize);
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, startTime, Utils.Infinite_Time, chunkSize, CloudBlobMetadata.EncryptionOrigin.VCR, vcrKmsContext, cryptoAgentFactory, chunkSize, (short) 0);
cloudBlobMetadata.setUploadTime(startTime + j * 1000);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, chunkSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
}
FindToken findToken = findTokenFactory.getNewFindToken();
// Call findEntriesSince in a loop until no new entries are returned
FindResult findResult;
int numQueries = 0;
int totalBlobsReturned = 0;
do {
findResult = findEntriesSinceWithRetry(partitionPath, findToken, maxTotalSize);
findToken = findResult.getUpdatedFindToken();
if (!findResult.getMetadataList().isEmpty()) {
numQueries++;
}
totalBlobsReturned += findResult.getMetadataList().size();
} while (!noMoreFindSinceEntries(findResult, findToken));
assertEquals("Wrong number of queries", expectedNumQueries, numQueries);
assertEquals("Wrong number of blobs", blobCount, totalBlobsReturned);
assertEquals("Wrong byte count", blobCount * chunkSize, findToken.getBytesRead());
cleanup();
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class AzureIntegrationTest method testNormalFlow.
/**
* Test normal operations.
* @throws Exception on error
*/
@Test
public void testNormalFlow() throws Exception {
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
byte[] uploadData = TestUtils.getRandomBytes(blobSize);
InputStream inputStream = new ByteArrayInputStream(uploadData);
long now = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, now, now + 60000, blobSize, CloudBlobMetadata.EncryptionOrigin.VCR, vcrKmsContext, cryptoAgentFactory, blobSize, (short) 0);
// attempt undelete before uploading blob
try {
undeleteBlobWithRetry(blobId, (short) 1);
fail("Undelete of a non existent blob should fail.");
} catch (CloudStorageException cex) {
assertEquals(cex.getStatusCode(), HttpConstants.StatusCodes.NOTFOUND);
}
assertTrue("Expected upload to return true", AzureTestUtils.uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
// Get blob should return the same data
verifyDownloadMatches(blobId, uploadData);
// Try to upload same blob again
assertFalse("Expected duplicate upload to return false", AzureTestUtils.uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, new ByteArrayInputStream(uploadData), cloudRequestAgent, azureDest));
// ttl update
long expirationTime = Utils.Infinite_Time;
try {
updateBlobExpirationWithRetry(blobId, expirationTime);
} catch (Exception ex) {
fail("Expected update to be successful");
}
CloudBlobMetadata metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(expirationTime, metadata.getExpirationTime());
// delete blob
long deletionTime = now + 10000;
// TODO add a test case here to verify life version after delete.
assertTrue("Expected deletion to return true", cloudRequestAgent.doWithRetries(() -> azureDest.deleteBlob(blobId, deletionTime, (short) 0, dummyCloudUpdateValidator), "DeleteBlob", partitionId.toPathString()));
metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(deletionTime, metadata.getDeletionTime());
// undelete blob
assertEquals(undeleteBlobWithRetry(blobId, (short) 1), 1);
metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(metadata.getDeletionTime(), Utils.Infinite_Time);
assertEquals(metadata.getLifeVersion(), 1);
// undelete with a higher life version updates life version.
assertEquals(undeleteBlobWithRetry(blobId, (short) 2), 2);
metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(metadata.getDeletionTime(), Utils.Infinite_Time);
assertEquals(metadata.getLifeVersion(), 2);
// delete after undelete.
long newDeletionTime = now + 20000;
// TODO add a test case here to verify life version after delete.
assertTrue("Expected deletion to return true", cloudRequestAgent.doWithRetries(() -> azureDest.deleteBlob(blobId, newDeletionTime, (short) 3, dummyCloudUpdateValidator), "DeleteBlob", partitionId.toPathString()));
metadata = getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).get(blobId.getID());
assertEquals(newDeletionTime, metadata.getDeletionTime());
// delete changes life version.
assertEquals(metadata.getLifeVersion(), 3);
// compact partition
azureDest.compactPartition(partitionId.toPathString());
assertTrue("Expected empty set after purge", getBlobMetadataWithRetry(Collections.singletonList(blobId), partitionId.toPathString(), cloudRequestAgent, azureDest).isEmpty());
// Get blob should fail after purge
try {
verifyDownloadMatches(blobId, uploadData);
fail("download blob should fail after data is purged");
} catch (CloudStorageException csex) {
}
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class VcrRequestsTest method validateRequestTest.
/**
* Test for {@code VcrRequests#validateRequest}
*/
@Test
public void validateRequestTest() {
// test for null partitionid
Assert.assertEquals(vcrRequests.validateRequest(null, null, false), ServerErrorCode.Bad_Request);
Assert.assertEquals(vcrRequests.validateRequest(null, null, true), ServerErrorCode.Bad_Request);
// test for unavailable partitionid
MockPartitionId unavailablePartitionId = new MockPartitionId();
Assert.assertEquals(vcrRequests.validateRequest(unavailablePartitionId, null, false), ServerErrorCode.No_Error);
Assert.assertEquals(vcrRequests.validateRequest(unavailablePartitionId, null, true), ServerErrorCode.No_Error);
// test for available partitionid
Assert.assertEquals(vcrRequests.validateRequest(availablePartitionId, null, false), ServerErrorCode.No_Error);
Assert.assertEquals(vcrRequests.validateRequest(availablePartitionId, null, true), ServerErrorCode.No_Error);
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class CloudStorageManagerTest method controlCompactionForBlobStoreTest.
/**
* Test {@code CloudStorageManager#controlCompactionForBlobStore}
*/
@Test
public void controlCompactionForBlobStoreTest() throws IOException {
CloudStorageManager cloudStorageManager = createNewCloudStorageManager();
try {
cloudStorageManager.controlCompactionForBlobStore(new MockPartitionId(), true);
Assert.fail("CloudStorageManager controlCompactionForBlobStore should throw unimplemented exception");
} catch (UnsupportedOperationException e) {
}
try {
cloudStorageManager.controlCompactionForBlobStore(new MockPartitionId(), false);
Assert.fail("CloudStorageManager controlCompactionForBlobStore should throw unimplemented exception");
} catch (UnsupportedOperationException e) {
}
}
use of com.github.ambry.clustermap.MockPartitionId in project ambry by linkedin.
the class CloudBlobMetadataTest method setup.
@Before
public void setup() throws Exception {
PartitionId partitionId = new MockPartitionId(partition, MockClusterMap.DEFAULT_PARTITION_CLASS);
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
}
Aggregations