use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CosmosDataAccessor method createMetadataFromDocument.
/**
* Create {@link CloudBlobMetadata} object from {@link Document} object.
* @param document {@link Document} object from which {@link CloudBlobMetadata} object will be created.
* @return {@link CloudBlobMetadata} object.
*/
private CloudBlobMetadata createMetadataFromDocument(Document document) {
CloudBlobMetadata cloudBlobMetadata = document.toObject(CloudBlobMetadata.class);
cloudBlobMetadata.setLastUpdateTime(document.getLong(COSMOS_LAST_UPDATED_COLUMN));
return cloudBlobMetadata;
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CloudBlobMetadataTest method testExpiration.
/**
* TTL blob
*/
@Test
public void testExpiration() throws Exception {
CloudBlobMetadata blobMetadata = new CloudBlobMetadata(blobId, now, futureTime, 1024, EncryptionOrigin.NONE);
blobMetadata.setExpirationTime(futureTime);
verifySerde(blobMetadata, ArrayUtils.addAll(FIELDS_ALWAYS_SET, FIELD_EXPIRATION_TIME), ArrayUtils.removeElement(FIELDS_RARELY_SET, FIELD_EXPIRATION_TIME));
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CloudBlobMetadataTest method verifySerde.
/**
* Verify that the correct fields are serialized, and that deserialization produces the same record.
* @param blobMetadata the {@link CloudBlobMetadata} to verify.
* @param expectedFields the fields expected to be serialized.
* @param unexpectedFields the fields expected not to be serialized.
* @throws JsonProcessingException
*/
private void verifySerde(CloudBlobMetadata blobMetadata, String[] expectedFields, String[] unexpectedFields) throws JsonProcessingException {
Map<String, String> propertyMap = blobMetadata.toMap();
for (String fieldName : expectedFields) {
assertTrue("Expected field " + fieldName, propertyMap.containsKey(fieldName));
}
for (String fieldName : unexpectedFields) {
assertFalse("Unexpected field " + fieldName, propertyMap.containsKey(fieldName));
}
String serializedString = mapperObj.writeValueAsString(blobMetadata);
CloudBlobMetadata deserBlobMetadata = mapperObj.readValue(serializedString, CloudBlobMetadata.class);
assertEquals("Expected equality", blobMetadata, deserBlobMetadata);
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CloudBlobMetadataTest method testPermanent.
/**
* Permanent blob
*/
@Test
public void testPermanent() throws Exception {
CloudBlobMetadata blobMetadata = new CloudBlobMetadata(blobId, now, -1, 1024, EncryptionOrigin.NONE);
verifySerde(blobMetadata, FIELDS_ALWAYS_SET, FIELDS_RARELY_SET);
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CloudAndStoreReplicationTest method basicCloudRecoveryTest.
/**
* Test replication from vcr to server nodes, and from server to server nodes. Creates one vcr node and two server nodes.
* Uploads data to vcr node and verifies that they have been replicated.
* Uploads data to one of the server nodes and verifies that they have been replicated.
* @throws Exception If an exception happens.
*/
@Test
public void basicCloudRecoveryTest() throws Exception {
// Create blobs and upload to cloud destination.
Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
for (BlobId blobId : cloudBlobIds) {
int blobSize = Utils.getRandomShort(TestUtils.RANDOM);
PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
long time = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
}
// Create blobs and upload to one of the server nodes.
sendBlobToDataNode(partitionLeaderRecoveryNode, Utils.getRandomShort(TestUtils.RANDOM), blobIdToSizeMap);
// Waiting for download attempt
assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
// Waiting for replication to complete
Thread.sleep(10000);
// Test cloud to store and store to store replication by sending get request to server nodes.
testGetOnServerNodes(blobIdToSizeMap);
}
Aggregations