Search in sources :

Example 31 with PartitionId

use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.

the class AzureIntegrationTest method testCompaction.

/**
 * Test blob compaction.
 * @throws Exception on error
 */
@Test
public void testCompaction() throws Exception {
    cleanup();
    int bucketCount = 20;
    PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
    // Upload blobs in various lifecycle states
    long now = System.currentTimeMillis();
    long creationTime = now - TimeUnit.DAYS.toMillis(7);
    for (int j = 0; j < bucketCount; j++) {
        Thread.sleep(20);
        logger.info("Uploading bucket {}", j);
        // Active blob
        BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        InputStream inputStream = getBlobInputStream(blobSize);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
        // Blob deleted before retention cutoff (should match)
        long timeOfDeath = now - TimeUnit.DAYS.toMillis(retentionPeriodDays + 1);
        blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        cloudBlobMetadata.setDeletionTime(timeOfDeath);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
        // Blob expired before retention cutoff (should match)
        blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
        // Blob deleted after retention cutoff
        timeOfDeath = now - TimeUnit.HOURS.toMillis(1);
        blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        cloudBlobMetadata.setDeletionTime(timeOfDeath);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
        // Blob expired after retention cutoff
        blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
        cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
        assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
    }
    // run getDeadBlobs query, should return 2 * bucketCount
    String partitionPath = String.valueOf(testPartition);
    int compactedCount = azureDest.compactPartition(partitionPath);
    assertEquals("Unexpected count compacted", 2 * bucketCount, compactedCount);
    cleanup();
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) Test(org.junit.Test)

Example 32 with PartitionId

use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.

the class CloudStorageManagerTest method addStartAndRemoveBlobStoreTest.

/**
 * Test {@code CloudStorageManager#addBlobStore}, {@code CloudStorageManager#startBlobStore}, {@code CloudStorageManager#removeBlobStore}
 * @throws IOException
 */
@Test
public void addStartAndRemoveBlobStoreTest() throws IOException {
    CloudStorageManager cloudStorageManager = createNewCloudStorageManager();
    ReplicaId mockReplicaId = clusterMap.getReplicaIds(clusterMap.getDataNodeIds().get(0)).get(0);
    PartitionId partitionId = mockReplicaId.getPartitionId();
    // start store for Partitionid not added to the store
    Assert.assertFalse(cloudStorageManager.startBlobStore(partitionId));
    // remove store for Partitionid not added to the store
    Assert.assertFalse(cloudStorageManager.removeBlobStore(partitionId));
    // add a replica to the store
    Assert.assertTrue(cloudStorageManager.addBlobStore(mockReplicaId));
    // add an already added replica to the store
    Assert.assertTrue(cloudStorageManager.addBlobStore(mockReplicaId));
    // try start for the added paritition
    Assert.assertTrue(cloudStorageManager.startBlobStore(partitionId));
    // try remove for an added partition
    Assert.assertTrue(cloudStorageManager.removeBlobStore(partitionId));
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) MockReplicaId(com.github.ambry.clustermap.MockReplicaId) ReplicaId(com.github.ambry.clustermap.ReplicaId) Test(org.junit.Test)

Example 33 with PartitionId

use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.

the class CosmosDataAccessorTest method setup.

@Before
public void setup() {
    mockumentClient = mock(AsyncDocumentClient.class);
    byte dataCenterId = 66;
    short accountId = 101;
    short containerId = 5;
    PartitionId partitionId = new MockPartitionId();
    blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
    blobMetadata = new CloudBlobMetadata(blobId, System.currentTimeMillis(), Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
    azureMetrics = new AzureMetrics(new MetricRegistry());
    VcrMetrics vcrMetrics = new VcrMetrics(new MetricRegistry());
    cosmosAccessor = new CosmosDataAccessor(mockumentClient, "ambry/metadata", "ambry/deletedContainer", vcrMetrics, azureMetrics);
}
Also used : VcrMetrics(com.github.ambry.cloud.VcrMetrics) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) CloudBlobMetadata(com.github.ambry.cloud.CloudBlobMetadata) MetricRegistry(com.codahale.metrics.MetricRegistry) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) AsyncDocumentClient(com.microsoft.azure.cosmosdb.rx.AsyncDocumentClient) BlobId(com.github.ambry.commons.BlobId) Before(org.junit.Before)

Example 34 with PartitionId

use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.

the class FrontendIntegrationTest method getReplicasTest.

/**
 * Tests {@link RestUtils.SubResource#Replicas} requests
 * <p/>
 * For each {@link PartitionId} in the {@link ClusterMap}, a {@link BlobId} is created. The replica list returned from
 * server is checked for equality against a locally obtained replica list.
 * @throws Exception
 */
@Test
public void getReplicasTest() throws Exception {
    List<? extends PartitionId> partitionIds = CLUSTER_MAP.getWritablePartitionIds(null);
    for (PartitionId partitionId : partitionIds) {
        String originalReplicaStr = partitionId.getReplicaIds().toString().replace(", ", ",");
        BlobId blobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, Account.UNKNOWN_ACCOUNT_ID, Container.UNKNOWN_CONTAINER_ID, partitionId, false, BlobId.BlobDataType.DATACHUNK);
        FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, blobId.getID() + "/" + RestUtils.SubResource.Replicas, Unpooled.buffer(0));
        ResponseParts responseParts = nettyClient.sendRequest(httpRequest, null, null).get();
        HttpResponse response = getHttpResponse(responseParts);
        assertEquals("Unexpected response status", HttpResponseStatus.OK, response.status());
        verifyTrackingHeaders(response);
        ByteBuffer content = getContent(responseParts.queue, HttpUtil.getContentLength(response));
        JSONObject responseJson = new JSONObject(new String(content.array()));
        String returnedReplicasStr = responseJson.get(GetReplicasHandler.REPLICAS_KEY).toString().replace("\"", "");
        assertEquals("Replica IDs returned for the BlobId do no match with the replicas IDs of partition", originalReplicaStr, returnedReplicasStr);
    }
}
Also used : DefaultFullHttpRequest(io.netty.handler.codec.http.DefaultFullHttpRequest) FullHttpRequest(io.netty.handler.codec.http.FullHttpRequest) DefaultFullHttpRequest(io.netty.handler.codec.http.DefaultFullHttpRequest) JSONObject(org.json.JSONObject) HttpResponse(io.netty.handler.codec.http.HttpResponse) ResponseParts(com.github.ambry.rest.NettyClient.ResponseParts) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 35 with PartitionId

use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.

the class BlobIdTest method getRandomBlobId.

/**
 * Constructs a {@link BlobId} with random fields and the given version.
 * @param version The version of {@link BlobId} to build
 * @return A {@link BlobId} with random fields and the given version.
 */
private BlobId getRandomBlobId(short version) {
    byte[] bytes = new byte[2];
    random.nextBytes(bytes);
    random.nextBytes(bytes);
    byte datacenterId = bytes[0];
    short accountId = getRandomShort(random);
    short containerId = getRandomShort(random);
    BlobIdType type = random.nextBoolean() ? BlobIdType.NATIVE : BlobIdType.CRAFTED;
    PartitionId partitionId = referenceClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(random.nextInt(3));
    boolean isEncrypted = random.nextBoolean();
    BlobDataType dataType = BlobDataType.values()[random.nextInt(BlobDataType.values().length)];
    return new BlobId(version, type, datacenterId, accountId, containerId, partitionId, isEncrypted, dataType);
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) BlobId(com.github.ambry.commons.BlobId)

Aggregations

PartitionId (com.github.ambry.clustermap.PartitionId)183 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)111 Test (org.junit.Test)95 ReplicaId (com.github.ambry.clustermap.ReplicaId)70 ArrayList (java.util.ArrayList)68 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)53 BlobId (com.github.ambry.commons.BlobId)50 HashMap (java.util.HashMap)48 Map (java.util.Map)41 List (java.util.List)40 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)39 DataNodeId (com.github.ambry.clustermap.DataNodeId)36 MetricRegistry (com.codahale.metrics.MetricRegistry)33 ClusterMap (com.github.ambry.clustermap.ClusterMap)32 MockReplicaId (com.github.ambry.clustermap.MockReplicaId)30 VerifiableProperties (com.github.ambry.config.VerifiableProperties)30 IOException (java.io.IOException)29 HashSet (java.util.HashSet)29 StoreKey (com.github.ambry.store.StoreKey)26 StoreKeyFactory (com.github.ambry.store.StoreKeyFactory)25