use of com.github.ambry.clustermap.Partition in project ambry by linkedin.
the class CloudBlobStoreIntegrationTest method setup.
@Before
public void setup() throws ReflectiveOperationException {
Properties testProperties = new Properties();
try (InputStream input = this.getClass().getClassLoader().getResourceAsStream(PROPS_FILE_NAME)) {
if (input == null) {
throw new IllegalStateException("Could not find resource: " + PROPS_FILE_NAME);
}
testProperties.load(input);
} catch (IOException ex) {
throw new IllegalStateException("Could not load properties from resource: " + PROPS_FILE_NAME);
}
testProperties.setProperty("clustermap.cluster.name", "Integration-Test");
testProperties.setProperty("clustermap.datacenter.name", "uswest");
testProperties.setProperty("clustermap.host.name", "localhost");
testProperties.setProperty("kms.default.container.key", "B374A26A71490437AA024E4FADD5B497FDFF1A8EA6FF12F6FB65AF2720B59CCF");
testProperties.setProperty(CloudConfig.CLOUD_DELETED_BLOB_RETENTION_DAYS, String.valueOf(1));
testProperties.setProperty(AzureCloudConfig.AZURE_PURGE_BATCH_SIZE, "10");
testProperties.setProperty(CloudConfig.CLOUD_IS_VCR, "" + isVcr);
verifiableProperties = new VerifiableProperties(testProperties);
azureCloudConfig = new AzureCloudConfig(verifiableProperties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(verifiableProperties);
CloudConfig cloudConfig = new CloudConfig(verifiableProperties);
partitionId = new Partition(666, clusterMapConfig.clusterMapDefaultPartitionClass, PartitionState.READ_WRITE, 100 * 1024 * 1024 * 1024L);
ClusterMap clusterMap = new MockClusterMap(false, Collections.singletonList(new MockDataNodeId(Collections.singletonList(new Port(6666, PortType.PLAINTEXT)), Collections.singletonList("test"), "AzureTest")), 1, Collections.singletonList(partitionId), "AzureTest");
MetricRegistry registry = new MetricRegistry();
vcrMetrics = new VcrMetrics(registry);
azureMetrics = new AzureMetrics(registry);
CloudDestinationFactory cloudDestinationFactory = Utils.getObj(cloudConfig.cloudDestinationFactoryClass, verifiableProperties, registry, clusterMap);
cloudDestination = cloudDestinationFactory.getCloudDestination();
cloudBlobStore = new CloudBlobStore(verifiableProperties, partitionId, cloudDestination, clusterMap, vcrMetrics);
cloudBlobStore.start();
}
use of com.github.ambry.clustermap.Partition in project ambry by linkedin.
the class FrontendQuotaIntegrationTest method verifyPostRejectsAndReturnRandomBlobId.
/**
* Verifies a POST and returns the blob ID.
* @param responseParts the response received from the server.
* @returnn the blob ID of the blob.
*/
String verifyPostRejectsAndReturnRandomBlobId(NettyClient.ResponseParts responseParts, long contentSize) {
HttpResponse response = getHttpResponse(responseParts);
assertEquals("Unexpected response status", HttpResponseStatus.TOO_MANY_REQUESTS, response.status());
assertTrue("No Date header", response.headers().getTimeMillis(HttpHeaderNames.DATE, -1) != -1);
assertNull("No " + RestUtils.Headers.CREATION_TIME, response.headers().get(RestUtils.Headers.CREATION_TIME, null));
assertEquals("Content-Length is not 0", 0, HttpUtil.getContentLength(response));
String blobId = response.headers().get(HttpHeaderNames.LOCATION, null);
assertNull("Blob ID from POST should be null", blobId);
assertNoContent(responseParts.queue, 1);
assertFalse("Channel should be inactive", HttpUtil.isKeepAlive(response));
assertEquals("No blob size should be returned in response", null, response.headers().get(RestUtils.Headers.BLOB_SIZE));
verifyTrackingHeaders(response);
verifyUserQuotaHeaders(response);
return new BlobId(BlobId.BLOB_ID_V6, BlobId.BlobIdType.NATIVE, (byte) 0, ACCOUNT.getId(), CONTAINER.getId(), new Partition(0L, DEFAULT_PARTITION_CLASS, PartitionState.READ_WRITE, 1073741824), false, BlobId.BlobDataType.SIMPLE).getID();
}
Aggregations