use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class AzureIntegrationTest method testCompaction.
/**
* Test blob compaction.
* @throws Exception on error
*/
@Test
public void testCompaction() throws Exception {
cleanup();
int bucketCount = 20;
PartitionId partitionId = new MockPartitionId(testPartition, MockClusterMap.DEFAULT_PARTITION_CLASS);
// Upload blobs in various lifecycle states
long now = System.currentTimeMillis();
long creationTime = now - TimeUnit.DAYS.toMillis(7);
for (int j = 0; j < bucketCount; j++) {
Thread.sleep(20);
logger.info("Uploading bucket {}", j);
// Active blob
BlobId blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
CloudBlobMetadata cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
InputStream inputStream = getBlobInputStream(blobSize);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, inputStream, cloudRequestAgent, azureDest));
// Blob deleted before retention cutoff (should match)
long timeOfDeath = now - TimeUnit.DAYS.toMillis(retentionPeriodDays + 1);
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
cloudBlobMetadata.setDeletionTime(timeOfDeath);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
// Blob expired before retention cutoff (should match)
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
// Blob deleted after retention cutoff
timeOfDeath = now - TimeUnit.HOURS.toMillis(1);
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
cloudBlobMetadata.setDeletionTime(timeOfDeath);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
// Blob expired after retention cutoff
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
cloudBlobMetadata = new CloudBlobMetadata(blobId, creationTime, timeOfDeath, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
assertTrue("Expected upload to return true", uploadBlobWithRetry(blobId, blobSize, cloudBlobMetadata, getBlobInputStream(blobSize), cloudRequestAgent, azureDest));
}
// run getDeadBlobs query, should return 2 * bucketCount
String partitionPath = String.valueOf(testPartition);
int compactedCount = azureDest.compactPartition(partitionPath);
assertEquals("Unexpected count compacted", 2 * bucketCount, compactedCount);
cleanup();
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class CloudStorageManagerTest method addStartAndRemoveBlobStoreTest.
/**
* Test {@code CloudStorageManager#addBlobStore}, {@code CloudStorageManager#startBlobStore}, {@code CloudStorageManager#removeBlobStore}
* @throws IOException
*/
@Test
public void addStartAndRemoveBlobStoreTest() throws IOException {
CloudStorageManager cloudStorageManager = createNewCloudStorageManager();
ReplicaId mockReplicaId = clusterMap.getReplicaIds(clusterMap.getDataNodeIds().get(0)).get(0);
PartitionId partitionId = mockReplicaId.getPartitionId();
// start store for Partitionid not added to the store
Assert.assertFalse(cloudStorageManager.startBlobStore(partitionId));
// remove store for Partitionid not added to the store
Assert.assertFalse(cloudStorageManager.removeBlobStore(partitionId));
// add a replica to the store
Assert.assertTrue(cloudStorageManager.addBlobStore(mockReplicaId));
// add an already added replica to the store
Assert.assertTrue(cloudStorageManager.addBlobStore(mockReplicaId));
// try start for the added paritition
Assert.assertTrue(cloudStorageManager.startBlobStore(partitionId));
// try remove for an added partition
Assert.assertTrue(cloudStorageManager.removeBlobStore(partitionId));
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class CosmosDataAccessorTest method setup.
@Before
public void setup() {
mockumentClient = mock(AsyncDocumentClient.class);
byte dataCenterId = 66;
short accountId = 101;
short containerId = 5;
PartitionId partitionId = new MockPartitionId();
blobId = new BlobId(BLOB_ID_V6, BlobIdType.NATIVE, dataCenterId, accountId, containerId, partitionId, false, BlobDataType.DATACHUNK);
blobMetadata = new CloudBlobMetadata(blobId, System.currentTimeMillis(), Utils.Infinite_Time, blobSize, CloudBlobMetadata.EncryptionOrigin.NONE);
azureMetrics = new AzureMetrics(new MetricRegistry());
VcrMetrics vcrMetrics = new VcrMetrics(new MetricRegistry());
cosmosAccessor = new CosmosDataAccessor(mockumentClient, "ambry/metadata", "ambry/deletedContainer", vcrMetrics, azureMetrics);
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class FrontendIntegrationTest method getReplicasTest.
/**
* Tests {@link RestUtils.SubResource#Replicas} requests
* <p/>
* For each {@link PartitionId} in the {@link ClusterMap}, a {@link BlobId} is created. The replica list returned from
* server is checked for equality against a locally obtained replica list.
* @throws Exception
*/
@Test
public void getReplicasTest() throws Exception {
List<? extends PartitionId> partitionIds = CLUSTER_MAP.getWritablePartitionIds(null);
for (PartitionId partitionId : partitionIds) {
String originalReplicaStr = partitionId.getReplicaIds().toString().replace(", ", ",");
BlobId blobId = new BlobId(CommonTestUtils.getCurrentBlobIdVersion(), BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, Account.UNKNOWN_ACCOUNT_ID, Container.UNKNOWN_CONTAINER_ID, partitionId, false, BlobId.BlobDataType.DATACHUNK);
FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, blobId.getID() + "/" + RestUtils.SubResource.Replicas, Unpooled.buffer(0));
ResponseParts responseParts = nettyClient.sendRequest(httpRequest, null, null).get();
HttpResponse response = getHttpResponse(responseParts);
assertEquals("Unexpected response status", HttpResponseStatus.OK, response.status());
verifyTrackingHeaders(response);
ByteBuffer content = getContent(responseParts.queue, HttpUtil.getContentLength(response));
JSONObject responseJson = new JSONObject(new String(content.array()));
String returnedReplicasStr = responseJson.get(GetReplicasHandler.REPLICAS_KEY).toString().replace("\"", "");
assertEquals("Replica IDs returned for the BlobId do no match with the replicas IDs of partition", originalReplicaStr, returnedReplicasStr);
}
}
use of com.github.ambry.clustermap.PartitionId in project ambry by linkedin.
the class BlobIdTest method getRandomBlobId.
/**
* Constructs a {@link BlobId} with random fields and the given version.
* @param version The version of {@link BlobId} to build
* @return A {@link BlobId} with random fields and the given version.
*/
private BlobId getRandomBlobId(short version) {
byte[] bytes = new byte[2];
random.nextBytes(bytes);
random.nextBytes(bytes);
byte datacenterId = bytes[0];
short accountId = getRandomShort(random);
short containerId = getRandomShort(random);
BlobIdType type = random.nextBoolean() ? BlobIdType.NATIVE : BlobIdType.CRAFTED;
PartitionId partitionId = referenceClusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(random.nextInt(3));
boolean isEncrypted = random.nextBoolean();
BlobDataType dataType = BlobDataType.values()[random.nextInt(BlobDataType.values().length)];
return new BlobId(version, type, datacenterId, accountId, containerId, partitionId, isEncrypted, dataType);
}
Aggregations