use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CosmosDataAccessorTest method testQueryChangeFeedNormal.
/**
* Test change feed query.
*/
@Test
public void testQueryChangeFeedNormal() throws Exception {
Observable<FeedResponse<Document>> mockResponse = mock(Observable.class);
List<Document> docList = Collections.singletonList(AzureTestUtils.createDocumentFromCloudBlobMetadata(blobMetadata));
mockObservableForChangeFeedQuery(docList, mockResponse);
when(mockumentClient.queryDocumentChangeFeed(anyString(), any(ChangeFeedOptions.class))).thenReturn(mockResponse);
// test with non null requestContinuationToken
List<CloudBlobMetadata> metadataList = doQueryChangeFeed("test");
assertEquals("Expected single entry", 1, metadataList.size());
CloudBlobMetadata outputMetadata = metadataList.get(0);
assertEquals("Returned metadata does not match original", blobMetadata, outputMetadata);
assertEquals(1, azureMetrics.changeFeedQueryCount.getCount());
assertEquals(0, azureMetrics.changeFeedQueryFailureCount.getCount());
// test with a null continuation token
metadataList = doQueryChangeFeed(null);
assertEquals("Expected single entry", 1, metadataList.size());
outputMetadata = metadataList.get(0);
assertEquals("Returned metadata does not match original", blobMetadata, outputMetadata);
assertEquals(2, azureMetrics.changeFeedQueryCount.getCount());
assertEquals(0, azureMetrics.changeFeedQueryFailureCount.getCount());
// test when queryChangeFeed throws exception
when(mockumentClient.queryDocumentChangeFeed(anyString(), any(ChangeFeedOptions.class))).thenThrow(new RuntimeException("mock exception", new DocumentClientException(404)));
try {
doQueryChangeFeed(null);
} catch (DocumentClientException e) {
}
assertEquals(3, azureMetrics.changeFeedQueryCount.getCount());
assertEquals(1, azureMetrics.changeFeedQueryFailureCount.getCount());
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class ServerTestUtil method endToEndCloudBackupTest.
/**
* Tests blobs put to dataNode can be backed up by {@link com.github.ambry.cloud.VcrReplicationManager}.
* @param cluster the {@link MockCluster} of dataNodes.
* @param zkConnectString ZK endpoint to establish VCR cluster
* @param vcrClusterName the name of VCR cluster
* @param dataNode the datanode where blobs are originally put.
* @param clientSSLConfig the {@link SSLConfig}.
* @param clientSSLSocketFactory the {@link SSLSocketFactory}.
* @param notificationSystem the {@link MockNotificationSystem} to track blobs event in {@link MockCluster}.
* @param vcrSSLProps SSL related properties for VCR. Can be {@code null}.
* @param doTtlUpdate Do ttlUpdate request if {@code true}.
*/
static void endToEndCloudBackupTest(MockCluster cluster, String zkConnectString, String vcrClusterName, DataNodeId dataNode, SSLConfig clientSSLConfig, SSLSocketFactory clientSSLSocketFactory, MockNotificationSystem notificationSystem, Properties vcrSSLProps, boolean doTtlUpdate) throws Exception {
int blobBackupCount = 10;
int blobSize = 100;
int userMetaDataSize = 100;
ClusterAgentsFactory clusterAgentsFactory = cluster.getClusterAgentsFactory();
// Send blobs to DataNode
byte[] userMetadata = new byte[userMetaDataSize];
byte[] data = new byte[blobSize];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
long ttl = doTtlUpdate ? TimeUnit.DAYS.toMillis(1) : Utils.Infinite_Time;
BlobProperties properties = new BlobProperties(blobSize, "serviceid1", null, null, false, ttl, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
TestUtils.RANDOM.nextBytes(userMetadata);
TestUtils.RANDOM.nextBytes(data);
Port port;
if (clientSSLConfig == null) {
port = new Port(dataNode.getPort(), PortType.PLAINTEXT);
} else {
port = new Port(dataNode.getSSLPort(), PortType.SSL);
}
ConnectedChannel channel = getBlockingChannelBasedOnPortType(port, "localhost", clientSSLSocketFactory, clientSSLConfig);
channel.connect();
CountDownLatch latch = new CountDownLatch(1);
DirectSender runnable = new DirectSender(cluster, channel, blobBackupCount, data, userMetadata, properties, null, latch);
Thread threadToRun = new Thread(runnable);
threadToRun.start();
assertTrue("Did not put all blobs in 2 minutes", latch.await(2, TimeUnit.MINUTES));
// TODO: remove this temp fix after fixing race condition in MockCluster/MockNotificationSystem
Thread.sleep(3000);
List<BlobId> blobIds = runnable.getBlobIds();
for (BlobId blobId : blobIds) {
notificationSystem.awaitBlobCreations(blobId.getID());
if (doTtlUpdate) {
updateBlobTtl(channel, blobId, cluster.time.milliseconds());
}
}
HelixControllerManager helixControllerManager = VcrTestUtil.populateZkInfoAndStartController(zkConnectString, vcrClusterName, cluster.getClusterMap());
// Start the VCR and CloudBackupManager
Properties props = VcrTestUtil.createVcrProperties(dataNode.getDatacenterName(), vcrClusterName, zkConnectString, 12310, 12410, 12510, vcrSSLProps);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIds, clusterAgentsFactory.getClusterMap());
CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
VcrServer vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(props), clusterAgentsFactory, notificationSystem, cloudDestinationFactory);
vcrServer.startup();
// Waiting for backup done
assertTrue("Did not backup all blobs in 2 minutes", latchBasedInMemoryCloudDestination.awaitUpload(2, TimeUnit.MINUTES));
Map<String, CloudBlobMetadata> cloudBlobMetadataMap = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIds);
for (BlobId blobId : blobIds) {
CloudBlobMetadata cloudBlobMetadata = cloudBlobMetadataMap.get(blobId.toString());
assertNotNull("cloudBlobMetadata should not be null", cloudBlobMetadata);
assertEquals("AccountId mismatch", accountId, cloudBlobMetadata.getAccountId());
assertEquals("ContainerId mismatch", containerId, cloudBlobMetadata.getContainerId());
assertEquals("Expiration time mismatch", Utils.Infinite_Time, cloudBlobMetadata.getExpirationTime());
// TODO: verify other metadata and blob data
}
vcrServer.shutdown();
helixControllerManager.syncStop();
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class VcrRecoveryTest method cloudRecoveryTestForLargeBlob.
/**
* Test recovery from one vcr node to one disk based data node for large blobs.
* Creates a vcr node and a disk based data node. Uploads data to vcr node and verifies that they have been downloaded.
* @throws Exception If an exception happens.
*/
@Test
public void cloudRecoveryTestForLargeBlob() throws Exception {
// Create blobs and upload to cloud destination.
int userMetaDataSize = 100;
byte[] userMetadata = new byte[userMetaDataSize];
TestUtils.RANDOM.nextBytes(userMetadata);
Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
// Currently ambry supports max size of 4MB for blobs.
int blobSize = FOUR_MB_SZ;
for (BlobId blobId : blobIds) {
PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
long time = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
}
// Waiting for download attempt
assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
// Waiting for replication to complete
Thread.sleep(10000);
// Test recovery by sending get request to recovery node
testGetOnRecoveryNode(blobIdToSizeMap);
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class VcrRecoveryTest method basicCloudRecoveryTest.
/**
* Test recovery from one vcr node to one disk based data node.
* Creates a vcr node and a disk based data node. Uploads data to vcr node and verifies that they have been downloaded.
* @throws Exception If an exception happens.
*/
@Test
public void basicCloudRecoveryTest() throws Exception {
// Create blobs and upload to cloud destination.
int userMetaDataSize = 100;
byte[] userMetadata = new byte[userMetaDataSize];
TestUtils.RANDOM.nextBytes(userMetadata);
Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
for (BlobId blobId : blobIds) {
int blobSize = Utils.getRandomShort(TestUtils.RANDOM);
PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
long time = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
}
// Waiting for download attempt
assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
// Waiting for replication to complete
Thread.sleep(10000);
// Test recovery by sending get request to recovery node
testGetOnRecoveryNode(blobIdToSizeMap);
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class CloudAndStoreReplicationTest method cloudRecoveryTestForLargeBlob.
/**
* Test replication from vcr to server nodes, and from server to server nodes for large blobs.
* Creates one vcr node and two server nodes.
* Uploads data to vcr node and verifies that they have been replicated.
* Uploads data to one of the server nodes and verifies that they have been replicated.
* @throws Exception If an exception happens.
*/
@Test
public void cloudRecoveryTestForLargeBlob() throws Exception {
// Create blobs and upload to cloud destination.
Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
// Currently ambry supports max size of 4MB for blobs.
int blobSize = FOUR_MB_SZ;
for (BlobId blobId : cloudBlobIds) {
PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
long time = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
}
// Create blobs and upload to one of the server nodes.
sendBlobToDataNode(partitionLeaderRecoveryNode, blobSize, blobIdToSizeMap);
// Waiting for download attempt
assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
// Waiting for replication to complete
Thread.sleep(10000);
// Test cloud to store and store to store replication by sending get request to server nodes.
testGetOnServerNodes(blobIdToSizeMap);
}
Aggregations