Search in sources :

Example 1 with CloudBlobMetadata

use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.

the class ServerTestUtil method endToEndCloudBackupTest.

/**
 * Tests blobs put to dataNode can be backed up by {@link com.github.ambry.cloud.VcrReplicationManager}.
 * @param cluster the {@link MockCluster} of dataNodes.
 * @param zkConnectString ZK endpoint to establish VCR cluster
 * @param vcrClusterName the name of VCR cluster
 * @param dataNode the datanode where blobs are originally put.
 * @param clientSSLConfig the {@link SSLConfig}.
 * @param clientSSLSocketFactory the {@link SSLSocketFactory}.
 * @param notificationSystem the {@link MockNotificationSystem} to track blobs event in {@link MockCluster}.
 * @param vcrSSLProps SSL related properties for VCR. Can be {@code null}.
 * @param doTtlUpdate Do ttlUpdate request if {@code true}.
 */
static void endToEndCloudBackupTest(MockCluster cluster, String zkConnectString, String vcrClusterName, DataNodeId dataNode, SSLConfig clientSSLConfig, SSLSocketFactory clientSSLSocketFactory, MockNotificationSystem notificationSystem, Properties vcrSSLProps, boolean doTtlUpdate) throws Exception {
    int blobBackupCount = 10;
    int blobSize = 100;
    int userMetaDataSize = 100;
    ClusterAgentsFactory clusterAgentsFactory = cluster.getClusterAgentsFactory();
    // Send blobs to DataNode
    byte[] userMetadata = new byte[userMetaDataSize];
    byte[] data = new byte[blobSize];
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    long ttl = doTtlUpdate ? TimeUnit.DAYS.toMillis(1) : Utils.Infinite_Time;
    BlobProperties properties = new BlobProperties(blobSize, "serviceid1", null, null, false, ttl, cluster.time.milliseconds(), accountId, containerId, false, null, null, null);
    TestUtils.RANDOM.nextBytes(userMetadata);
    TestUtils.RANDOM.nextBytes(data);
    Port port;
    if (clientSSLConfig == null) {
        port = new Port(dataNode.getPort(), PortType.PLAINTEXT);
    } else {
        port = new Port(dataNode.getSSLPort(), PortType.SSL);
    }
    ConnectedChannel channel = getBlockingChannelBasedOnPortType(port, "localhost", clientSSLSocketFactory, clientSSLConfig);
    channel.connect();
    CountDownLatch latch = new CountDownLatch(1);
    DirectSender runnable = new DirectSender(cluster, channel, blobBackupCount, data, userMetadata, properties, null, latch);
    Thread threadToRun = new Thread(runnable);
    threadToRun.start();
    assertTrue("Did not put all blobs in 2 minutes", latch.await(2, TimeUnit.MINUTES));
    // TODO: remove this temp fix after fixing race condition in MockCluster/MockNotificationSystem
    Thread.sleep(3000);
    List<BlobId> blobIds = runnable.getBlobIds();
    for (BlobId blobId : blobIds) {
        notificationSystem.awaitBlobCreations(blobId.getID());
        if (doTtlUpdate) {
            updateBlobTtl(channel, blobId, cluster.time.milliseconds());
        }
    }
    HelixControllerManager helixControllerManager = VcrTestUtil.populateZkInfoAndStartController(zkConnectString, vcrClusterName, cluster.getClusterMap());
    // Start the VCR and CloudBackupManager
    Properties props = VcrTestUtil.createVcrProperties(dataNode.getDatacenterName(), vcrClusterName, zkConnectString, 12310, 12410, 12510, vcrSSLProps);
    LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIds, clusterAgentsFactory.getClusterMap());
    CloudDestinationFactory cloudDestinationFactory = new LatchBasedInMemoryCloudDestinationFactory(latchBasedInMemoryCloudDestination);
    VcrServer vcrServer = VcrTestUtil.createVcrServer(new VerifiableProperties(props), clusterAgentsFactory, notificationSystem, cloudDestinationFactory);
    vcrServer.startup();
    // Waiting for backup done
    assertTrue("Did not backup all blobs in 2 minutes", latchBasedInMemoryCloudDestination.awaitUpload(2, TimeUnit.MINUTES));
    Map<String, CloudBlobMetadata> cloudBlobMetadataMap = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIds);
    for (BlobId blobId : blobIds) {
        CloudBlobMetadata cloudBlobMetadata = cloudBlobMetadataMap.get(blobId.toString());
        assertNotNull("cloudBlobMetadata should not be null", cloudBlobMetadata);
        assertEquals("AccountId mismatch", accountId, cloudBlobMetadata.getAccountId());
        assertEquals("ContainerId mismatch", containerId, cloudBlobMetadata.getContainerId());
        assertEquals("Expiration time mismatch", Utils.Infinite_Time, cloudBlobMetadata.getExpirationTime());
    // TODO: verify other metadata and blob data
    }
    vcrServer.shutdown();
    helixControllerManager.syncStop();
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) Port(com.github.ambry.network.Port) HelixControllerManager(com.github.ambry.utils.HelixControllerManager) ConnectedChannel(com.github.ambry.network.ConnectedChannel) LatchBasedInMemoryCloudDestinationFactory(com.github.ambry.cloud.LatchBasedInMemoryCloudDestinationFactory) CloudDestinationFactory(com.github.ambry.cloud.CloudDestinationFactory) CountDownLatch(java.util.concurrent.CountDownLatch) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) LatchBasedInMemoryCloudDestination(com.github.ambry.cloud.LatchBasedInMemoryCloudDestination) LatchBasedInMemoryCloudDestinationFactory(com.github.ambry.cloud.LatchBasedInMemoryCloudDestinationFactory) BlobProperties(com.github.ambry.messageformat.BlobProperties) VcrServer(com.github.ambry.cloud.VcrServer) ClusterAgentsFactory(com.github.ambry.clustermap.ClusterAgentsFactory) BlobId(com.github.ambry.commons.BlobId)

Example 2 with CloudBlobMetadata

use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.

the class VcrRecoveryTest method cloudRecoveryTestForLargeBlob.

/**
 * Test recovery from one vcr node to one disk based data node for large blobs.
 * Creates a vcr node and a disk based data node. Uploads data to vcr node and verifies that they have been downloaded.
 * @throws Exception If an exception happens.
 */
@Test
public void cloudRecoveryTestForLargeBlob() throws Exception {
    // Create blobs and upload to cloud destination.
    int userMetaDataSize = 100;
    byte[] userMetadata = new byte[userMetaDataSize];
    TestUtils.RANDOM.nextBytes(userMetadata);
    Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
    // Currently ambry supports max size of 4MB for blobs.
    int blobSize = FOUR_MB_SZ;
    for (BlobId blobId : blobIds) {
        PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
        long time = System.currentTimeMillis();
        CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
        latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
    }
    // Waiting for download attempt
    assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
    // Waiting for replication to complete
    Thread.sleep(10000);
    // Test recovery by sending get request to recovery node
    testGetOnRecoveryNode(blobIdToSizeMap);
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HashMap(java.util.HashMap) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Example 3 with CloudBlobMetadata

use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.

the class VcrRecoveryTest method basicCloudRecoveryTest.

/**
 * Test recovery from one vcr node to one disk based data node.
 * Creates a vcr node and a disk based data node. Uploads data to vcr node and verifies that they have been downloaded.
 * @throws Exception If an exception happens.
 */
@Test
public void basicCloudRecoveryTest() throws Exception {
    // Create blobs and upload to cloud destination.
    int userMetaDataSize = 100;
    byte[] userMetadata = new byte[userMetaDataSize];
    TestUtils.RANDOM.nextBytes(userMetadata);
    Map<BlobId, Integer> blobIdToSizeMap = new HashMap<>();
    for (BlobId blobId : blobIds) {
        int blobSize = Utils.getRandomShort(TestUtils.RANDOM);
        PutMessageFormatInputStream putMessageFormatInputStream = ServerTestUtil.getPutMessageInputStreamForBlob(blobId, blobSize, blobIdToSizeMap, accountId, containerId);
        long time = System.currentTimeMillis();
        CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, time, Utils.Infinite_Time, putMessageFormatInputStream.getSize(), CloudBlobMetadata.EncryptionOrigin.NONE);
        latchBasedInMemoryCloudDestination.uploadBlob(blobId, putMessageFormatInputStream.getSize(), cloudBlobMetadata, putMessageFormatInputStream);
    }
    // Waiting for download attempt
    assertTrue("Did not recover all blobs in 1 minute", latchBasedInMemoryCloudDestination.awaitDownload(1, TimeUnit.MINUTES));
    // Waiting for replication to complete
    Thread.sleep(10000);
    // Test recovery by sending get request to recovery node
    testGetOnRecoveryNode(blobIdToSizeMap);
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HashMap(java.util.HashMap) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Example 4 with CloudBlobMetadata

use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.

the class AzureContainerCompactorIntegrationTest method testCompactAssignedDeprecatedContainers.

@Test
public void testCompactAssignedDeprecatedContainers() throws CloudStorageException, DocumentClientException {
    // Create a deprecated container.
    Set<Container> containers = generateContainers(1);
    cloudDestination.deprecateContainers(containers);
    verifyCosmosData(containers);
    verifyCheckpoint(containers);
    Container testContainer = containers.iterator().next();
    // Create blobs in the deprecated container and test partition.
    int numBlobs = 100;
    PartitionId partitionId = new MockPartitionId(testPartitionId, MockClusterMap.DEFAULT_PARTITION_CLASS);
    long creationTime = System.currentTimeMillis();
    Map<BlobId, byte[]> blobIdtoDataMap = createUnencryptedPermanentBlobs(numBlobs, dataCenterId, testContainer.getParentAccountId(), testContainer.getId(), partitionId, blobSize, cloudRequestAgent, cloudDestination, creationTime);
    // Assert that blobs exist.
    Map<String, CloudBlobMetadata> metadataMap = getBlobMetadataWithRetry(new ArrayList<>(blobIdtoDataMap.keySet()), partitionId.toPathString(), cloudRequestAgent, cloudDestination);
    assertEquals("Unexpected size of returned metadata map", numBlobs, metadataMap.size());
    // compact the deprecated container.
    cloudDestination.getContainerCompactor().compactAssignedDeprecatedContainers(Collections.singletonList(partitionId));
    // Assert that deprecated container's blobs don't exist anymore.
    assertTrue("Expected empty set after container compaction", getBlobMetadataWithRetry(new ArrayList<>(blobIdtoDataMap.keySet()), partitionId.toPathString(), cloudRequestAgent, cloudDestination).isEmpty());
    cleanup();
}
Also used : Container(com.github.ambry.account.Container) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Example 5 with CloudBlobMetadata

use of com.github.ambry.cloud.CloudBlobMetadata in project ambry by linkedin.

the class AzureTestUtils method createUnencryptedPermanentBlobs.

/**
 * Utility method to create specified number of unencrypted blobs with permanent ttl and with specified properties.
 * @param numBlobs number of blobs to create.
 * @param dataCenterId datacenter id.
 * @param accountId account id.
 * @param containerId container id.
 * @param partitionId {@link PartitionId} of the partition in which blobs will be created.
 * @param blobSize size of blobs.
 * @param cloudRequestAgent {@link CloudRequestAgent} object.
 * @param azureDest {@link AzureCloudDestination} object.
 * @param creationTime blob creation time.
 * @return A {@link Map} of create blobs' {@link BlobId} and data.
 * @throws CloudStorageException in case of any exception while uploading blob.
 */
static Map<BlobId, byte[]> createUnencryptedPermanentBlobs(int numBlobs, byte dataCenterId, short accountId, short containerId, PartitionId partitionId, int blobSize, CloudRequestAgent cloudRequestAgent, AzureCloudDestination azureDest, long creationTime) throws CloudStorageException {
    Map<BlobId, byte[]> blobIdtoDataMap = new HashMap<>();
    for (int j = 0; j < numBlobs; j++) {
        BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        byte[] randomBytes = TestUtils.getRandomBytes(blobSize);
        blobIdtoDataMap.put(blobId, randomBytes);
        CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, new ByteArrayInputStream(randomBytes), cloudRequestAgent, azureDest));
    }
    return blobIdtoDataMap;
}
Also used : HashMap(java.util.HashMap) ByteArrayInputStream(java.io.ByteArrayInputStream) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) BlobId(com.github.ambry.commons.BlobId)

Aggregations

CloudBlobMetadata (com.github.ambry.cloud.CloudBlobMetadata)55 BlobId (com.github.ambry.commons.BlobId)27 Test (org.junit.Test)25 ArrayList (java.util.ArrayList)19 Document (com.microsoft.azure.cosmosdb.Document)14 PartitionId (com.github.ambry.clustermap.PartitionId)12 FeedResponse (com.microsoft.azure.cosmosdb.FeedResponse)12 FeedOptions (com.microsoft.azure.cosmosdb.FeedOptions)11 SqlQuerySpec (com.microsoft.azure.cosmosdb.SqlQuerySpec)11 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)10 InputStream (java.io.InputStream)10 VerifiableProperties (com.github.ambry.config.VerifiableProperties)9 ByteArrayInputStream (java.io.ByteArrayInputStream)8 Timer (com.codahale.metrics.Timer)7 FindResult (com.github.ambry.cloud.FindResult)7 MetricRegistry (com.codahale.metrics.MetricRegistry)6 CloudStorageException (com.github.ambry.cloud.CloudStorageException)6 ChangeFeedOptions (com.microsoft.azure.cosmosdb.ChangeFeedOptions)6 DocumentClientException (com.microsoft.azure.cosmosdb.DocumentClientException)6 VcrMetrics (com.github.ambry.cloud.VcrMetrics)5