use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class ServerTestUtil method endToEndCloudBackupTest.
/**
* Tests blobs put to dataNode can be backed up by {@link com.github.ambry.cloud.VcrReplicationManager}.
* @param cluster the {@link MockCluster} of dataNodes.
* @param zkConnectString ZK endpoint to establish VCR cluster
* @param vcrClusterName the name of VCR cluster
* @param dataNode the datanode where blobs are originally put.
* @param clientSSLConfig the {@link SSLConfig}.
* @param clientSSLSocketFactory the {@link SSLSocketFactory}.
* @param notificationSystem the {@link MockNotificationSystem} to track blobs event in {@link MockCluster}.
* @param vcrSSLProps SSL related properties for VCR. Can be {@code null}.
* @param doTtlUpdate Do ttlUpdate request if {@code true}.
*/
static void endToEndCloudBackupTest(MockCluster cluster, String zkConnectString, String vcrClusterName, DataNodeId dataNode, SSLConfig clientSSLConfig, SSLSocketFactory clientSSLSocketFactory, MockNotificationSystem notificationSystem, Properties vcrSSLProps, boolean doTtlUpdate) throws Exception {
int blobBackupCount = 10;
int blobSize = 100;
int userMetaDataSize = 100;
ClusterAgentsFactory clusterAgentsFactory = cluster.getClusterAgentsFactory();
// Send blobs to DataNode
byte[] userMetadata = new byte[userMetaDataSize];
byte[] data = new byte[blobSize];
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
long ttl = doTtlUpdate ? TimeUnit.DAYS.toMillis(1) : Utils.Infinite_Time;
BlobProperties properties = new BlobProperties(blobSize, "serviceid1", null, null, false, ttl, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
TestUtils.RANDOM.nextBytes(userMetadata);
TestUtils.RANDOM.nextBytes(data);
Port port;
if (clientSSLConfig == null) {
port = new Port(dataNode.getPort(), PortType.PLAINTEXT);
} else {
port = new Port(dataNode.getSSLPort(), PortType.SSL);
}
ConnectedChannel channel = getBlockingChannelBasedOnPortType(port, "localhost", clientSSLSocketFactory, clientSSLConfig);
channel.connect();
CountDownLatch latch = new CountDownLatch(1);
DirectSender runnable = new DirectSender(cluster, channel, blobBackupCount, data, userMetadata, properties, null, latch);
Thread threadToRun = new Thread(runnable);
threadToRun.start();
assertTrue("Did not put all blobs in 2 minutes", latch.await(2, TimeUnit.MINUTES));
// TODO: remove this temp fix after fixing race condition in MockCluster/MockNotificationSystem
Thread.sleep(3000);
List<BlobId> blobIds = runnable.getBlobIds();
for (BlobId blobId : blobIds) {
notificationSystem.awaitBlobCreations(blobId.getID());
if (doTtlUpdate) {
updateBlobTtl(channel, blobId, cluster.time.milliseconds());
}
}
HelixControllerManager helixControllerManager = VcrTestUtil.populateZkInfoAndStartController(zkConnectString, vcrClusterName, cluster.getClusterMap());
// Start the VCR and CloudBackupManager
Properties props = VcrTestUtil.createVcrProperties(dataNode.getDatacenterName(), vcrClusterName, zkConnectString, 12310, 12410, 12510, vcrSSLProps);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIds, clusterAgentsFactory.getClusterMap());
CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
VcrServer vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(props), clusterAgentsFactory, notificationSystem, cloudDestinationFactory);
vcrServer.startup();
// Waiting for backup done
assertTrue("Did not backup all blobs in 2 minutes", latchBasedInMemoryCloudDestination.awaitUpload(2, TimeUnit.MINUTES));
Map<String, CloudBlobMetadata> cloudBlobMetadataMap = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIds);
for (BlobId blobId : blobIds) {
CloudBlobMetadata cloudBlobMetadata = cloudBlobMetadataMap.get(blobId.toString());
assertNotNull("cloudBlobMetadata should not be null", cloudBlobMetadata);
assertEquals("AccountId mismatch", accountId, cloudBlobMetadata.getAccountId());
assertEquals("ContainerId mismatch", containerId, cloudBlobMetadata.getContainerId());
assertEquals("Expiration time mismatch", Utils.Infinite_Time, cloudBlobMetadata.getExpirationTime());
// TODO: verify other metadata and blob data
}
vcrServer.shutdown();
helixControllerManager.syncStop();
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class VcrRecoveryTest method cloudRecoveryTestForLargeBlob.
/**
* Test recovery from one vcr node to one disk based data node for large blobs.
* Creates a vcr node and a disk based data node. Uploads data to vcr node and verifies that they have been downloaded.
* @throws Exception If an exception happens.
*/
@Test
public void cloudRecoveryTestForLargeBlob() throws Exception {
// Create blobs and upload to cloud destination.
int userMetaDataSize = 100;
byte[] userMetadata = new byte[userMetaDataSize];
TestUtils.RANDOM.nextBytes(userMetadata);
Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
// Currently ambry supports max size of 4MB for blobs.
int blobSize = FOUR_MB_SZ;
for (BlobId blobId : blobIds) {
PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
long time = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
}
// Waiting for download attempt
assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
// Waiting for replication to complete
Thread.sleep(10000);
// Test recovery by sending get request to recovery node
testGetOnRecoveryNode(blobIdToSizeMap);
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class VcrRecoveryTest method basicCloudRecoveryTest.
/**
* Test recovery from one vcr node to one disk based data node.
* Creates a vcr node and a disk based data node. Uploads data to vcr node and verifies that they have been downloaded.
* @throws Exception If an exception happens.
*/
@Test
public void basicCloudRecoveryTest() throws Exception {
// Create blobs and upload to cloud destination.
int userMetaDataSize = 100;
byte[] userMetadata = new byte[userMetaDataSize];
TestUtils.RANDOM.nextBytes(userMetadata);
Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
for (BlobId blobId : blobIds) {
int blobSize = Utils.getRandomShort(TestUtils.RANDOM);
PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
long time = System.currentTimeMillis();
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
}
// Waiting for download attempt
assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
// Waiting for replication to complete
Thread.sleep(10000);
// Test recovery by sending get request to recovery node
testGetOnRecoveryNode(blobIdToSizeMap);
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureContainerCompactorIntegrationTest method testCompactAssignedDeprecatedContainers.
@Test
public void testCompactAssignedDeprecatedContainers() throws CloudStorageException, DocumentClientException {
// Create a deprecated container.
Set<Container> containers = generateContainers(1);
cloudDestination.deprecateContainers(containers);
verifyCosmosData(containers);
verifyCheckpoint(containers);
Container testContainer = containers.iterator().next();
// Create blobs in the deprecated container and test partition.
int numBlobs = 100;
PartitionId partitionId = new MockPartitionId(testPartitionId, MockClusterMap.DEFAULT_PARTITION_CLASS);
long creationTime = System.currentTimeMillis();
Map<BlobId, byte[]> blobIdtoDataMap = createUnencryptedPermanentBlobs(numBlobs, dataCenterId, testContainer.getParentAccountId(), testContainer.getId(), partitionId, blobSize, cloudRequestAgent, cloudDestination, creationTime);
// Assert that blobs exist.
Map<String, CloudBlobMetadata> metadataMap = getBlobMetadataWithRetry(new ArrayList<>(blobIdtoDataMap.keySet()), partitionId.toPathString(), cloudRequestAgent, cloudDestination);
assertEquals("Unexpected size of returned metadata map", numBlobs, metadataMap.size());
// compact the deprecated container.
cloudDestination.getContainerCompactor().compactAssignedDeprecatedContainers(Collections.singletonList(partitionId));
// Assert that deprecated container's blobs don't exist anymore.
assertTrue("Expected empty set after container compaction", getBlobMetadataWithRetry(new ArrayList<>(blobIdtoDataMap.keySet()), partitionId.toPathString(), cloudRequestAgent, cloudDestination).isEmpty());
cleanup();
}
use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.
the class AzureTestUtils method createUnencryptedPermanentBlobs.
/**
* Utility method to create specified number of unencrypted blobs with permanent ttl and with specified properties.
* @param numBlobs number of blobs to create.
* @param dataCenterId datacenter id.
* @param accountId account id.
* @param containerId container id.
* @param partitionId {@link PartitionId} of the partition in which blobs will be created.
* @param blobSize size of blobs.
* @param cloudRequestAgent {@link CloudRequestAgent} object.
* @param azureDest {@link AzureCloudDestination} object.
* @param creationTime blob creation time.
* @return A {@link Map} of create blobs' {@link BlobId} and data.
* @throws CloudStorageException in case of any exception while uploading blob.
*/
static Map<BlobId, byte[]> createUnencryptedPermanentBlobs(int numBlobs, byte dataCenterId, short accountId, short containerId, PartitionId partitionId, int blobSize, CloudRequestAgent cloudRequestAgent, AzureCloudDestination azureDest, long creationTime) throws CloudStorageException {
Map<BlobId, byte[]> blobIdtoDataMap = new HashMap<>();
for (int j = 0; j < numBlobs; j++) {
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
byte[] randomBytes = TestUtils.getRandomBytes(blobSize);
blobIdtoDataMap.put(blobId, randomBytes);
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, new ByteArrayInputStream(randomBytes), cloudRequestAgent, azureDest));
}
return blobIdtoDataMap;
}
Aggregations