use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class AmbryUrlSigningServiceTest method factoryTest.
/**
* Tests for {@link AmbryUrlSigningServiceFactory}.
*/
@Test
public void factoryTest() {
Properties properties = new Properties();
properties.setProperty("frontend.url.signer.upload.endpoint", UPLOAD_ENDPOINT);
properties.setProperty("frontend.url.signer.download.endpoint", DOWNLOAD_ENDPOINT);
properties.setProperty("frontend.url.signer.default.url.ttl.secs", Long.toString(DEFAULT_URL_TTL_SECS));
properties.setProperty("frontend.url.signer.default.max.upload.size.bytes", Long.toString(DEFAULT_MAX_UPLOAD_SIZE));
properties.setProperty("frontend.url.signer.max.url.ttl.secs", Long.toString(MAX_URL_TTL_SECS));
UrlSigningService signer = new AmbryUrlSigningServiceFactory(new VerifiableProperties(properties), new MetricRegistry()).getUrlSigningService();
assertNotNull("UrlSigningService is null", signer);
assertTrue("UrlSigningService is AmbryUrlSigningService", signer instanceof AmbryUrlSigningService);
}
use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class FrontendIntegrationTest method buildFrontendVProps.
// BeforeClass helpers
/**
* Builds properties required to start a {@link RestServer} as an Ambry frontend server.
* @param trustStoreFile the trust store file to add certificates to for SSL testing.
* @return a {@link VerifiableProperties} with the parameters for an Ambry frontend server.
*/
private static VerifiableProperties buildFrontendVProps(File trustStoreFile) throws IOException, GeneralSecurityException {
Properties properties = new Properties();
properties.put("rest.server.blob.storage.service.factory", "com.github.ambry.frontend.AmbryBlobStorageServiceFactory");
properties.put("rest.server.router.factory", "com.github.ambry.router.InMemoryRouterFactory");
properties.put("netty.server.port", Integer.toString(PLAINTEXT_SERVER_PORT));
properties.put("netty.server.ssl.port", Integer.toString(SSL_SERVER_PORT));
properties.put("netty.server.enable.ssl", "true");
// to test that backpressure does not impede correct operation.
properties.put("netty.server.request.buffer.watermark", "1");
// to test that multipart requests over a certain size fail
properties.put("netty.multipart.post.max.size.bytes", Long.toString(MAX_MULTIPART_POST_SIZE_BYTES));
TestSSLUtils.addSSLProperties(properties, "", SSLFactory.Mode.SERVER, trustStoreFile, "frontend");
properties.put("frontend.account.service.factory", "com.github.ambry.account.InMemAccountServiceFactory");
// add key for singleKeyManagementService
properties.put("kms.default.container.key", TestUtils.getRandomKey(32));
return new VerifiableProperties(properties);
}
use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class HelixBootstrapUpgradeUtil method verifyDataNodeAndDiskEquivalencyInDc.
/**
* Verify that the hardware layout information is in sync - which includes the node and disk information. Also verify
* that the replicas belonging to disks are in sync between the static cluster map and Helix.
* @param dc the datacenter whose information is to be verified.
* @param clusterName the cluster to be verified.
* @param partitionLayout the {@link PartitionLayout} of the static clustermap.
*/
private void verifyDataNodeAndDiskEquivalencyInDc(Datacenter dc, String clusterName, PartitionLayout partitionLayout) throws Exception {
// The following properties are immaterial for the tool, but the ClusterMapConfig mandates their presence.
Properties props = new Properties();
props.setProperty("clustermap.host.name", "localhost");
props.setProperty("clustermap.cluster.name", clusterName);
props.setProperty("clustermap.datacenter.name", dc.getName());
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
StaticClusterManager staticClusterMap = (new StaticClusterAgentsFactory(clusterMapConfig, partitionLayout)).getClusterMap();
HelixAdmin admin = adminForDc.get(dc.getName());
List<String> allInstancesInHelix = admin.getInstancesInCluster(clusterName);
for (DataNodeId dataNodeId : dc.getDataNodes()) {
Map<String, Map<String, String>> mountPathToReplicas = getMountPathToReplicas(staticClusterMap, dataNodeId);
DataNode dataNode = (DataNode) dataNodeId;
String instanceName = getInstanceName(dataNode);
ensureOrThrow(allInstancesInHelix.remove(instanceName), "Instance not present in Helix " + instanceName);
InstanceConfig instanceConfig = admin.getInstanceConfig(clusterName, instanceName);
Map<String, Map<String, String>> diskInfos = new HashMap<>(instanceConfig.getRecord().getMapFields());
for (Disk disk : dataNode.getDisks()) {
Map<String, String> diskInfoInHelix = diskInfos.remove(disk.getMountPath());
ensureOrThrow(diskInfoInHelix != null, "Disk not present for instance " + instanceName + " disk " + disk.getMountPath());
ensureOrThrow(disk.getRawCapacityInBytes() == Long.valueOf(diskInfoInHelix.get(ClusterMapUtils.DISK_CAPACITY_STR)), "Capacity mismatch for instance " + instanceName + " disk " + disk.getMountPath());
Set<String> replicasInClusterMap;
Map<String, String> replicaList = mountPathToReplicas.get(disk.getMountPath());
replicasInClusterMap = new HashSet<>();
if (replicaList != null) {
replicasInClusterMap.addAll(replicaList.keySet());
}
Set<String> replicasInHelix;
String replicasStr = diskInfoInHelix.get(ClusterMapUtils.REPLICAS_STR);
if (replicasStr.isEmpty()) {
replicasInHelix = new HashSet<>();
} else {
replicasInHelix = new HashSet<>();
List<String> replicaInfoList = Arrays.asList(replicasStr.split(ClusterMapUtils.REPLICAS_DELIM_STR));
for (String replicaInfo : replicaInfoList) {
String[] info = replicaInfo.split(ClusterMapUtils.REPLICAS_STR_SEPARATOR);
replicasInHelix.add(info[0]);
ensureOrThrow(info[1].equals(replicaList.get(info[0])), "Replica capacity should be the same.");
}
}
ensureOrThrow(replicasInClusterMap.equals(replicasInHelix), "Replica information not consistent for instance " + instanceName + " disk " + disk.getMountPath() + "\n in Helix: " + replicaList + "\n in static clustermap: " + replicasInClusterMap);
}
ensureOrThrow(diskInfos.isEmpty(), "Instance " + instanceName + " has extra disks in Helix: " + diskInfos);
ensureOrThrow(!dataNode.hasSSLPort() || (dataNode.getSSLPort() == Integer.valueOf(instanceConfig.getRecord().getSimpleField(ClusterMapUtils.SSLPORT_STR))), "SSL Port mismatch for instance " + instanceName);
ensureOrThrow(dataNode.getDatacenterName().equals(instanceConfig.getRecord().getSimpleField(ClusterMapUtils.DATACENTER_STR)), "Datacenter mismatch for instance " + instanceName);
ensureOrThrow(dataNode.getRackId() == Long.valueOf(instanceConfig.getRecord().getSimpleField(ClusterMapUtils.RACKID_STR)), "Rack Id mismatch for instance " + instanceName);
Set<String> sealedReplicasInHelix = new HashSet<>(instanceConfig.getRecord().getListField(ClusterMapUtils.SEALED_STR));
Set<String> sealedReplicasInClusterMap = new HashSet<>();
for (Replica replica : staticClusterMap.getReplicas(dataNodeId)) {
if (replica.getPartition().partitionState.equals(PartitionState.READ_ONLY)) {
sealedReplicasInClusterMap.add(Long.toString(replica.getPartition().getId()));
}
}
ensureOrThrow(sealedReplicasInClusterMap.equals(sealedReplicasInHelix), "Sealed replicas info mismatch for " + "instance " + instanceName);
}
ensureOrThrow(allInstancesInHelix.isEmpty(), "Following instances in Helix not found in the clustermap " + allInstancesInHelix);
}
use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class DataNodeTest method testSoftState.
@Test
public void testSoftState() throws JSONException, InterruptedException {
JSONObject jsonObject = TestUtils.getJsonDataNode(TestUtils.getLocalHost(), 6666, 7666, HardwareState.AVAILABLE, getDisks());
Properties props = new Properties();
props.setProperty("clustermap.fixedtimeout.datanode.retry.backoff.ms", Integer.toString(2000));
props.setProperty("clustermap.cluster.name", "test");
props.setProperty("clustermap.datacenter.name", "dc1");
props.setProperty("clustermap.host.name", "localhost");
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
int threshold = clusterMapConfig.clusterMapFixedTimeoutDatanodeErrorThreshold;
long retryBackoffMs = clusterMapConfig.clusterMapFixedTimeoutDataNodeRetryBackoffMs;
DataNode dataNode = new TestDataNode("datacenter", jsonObject, clusterMapConfig);
for (int i = 0; i < threshold; i++) {
ensure(dataNode, HardwareState.AVAILABLE);
dataNode.onNodeTimeout();
}
// After threshold number of continuous errors, the resource should be unavailable
ensure(dataNode, HardwareState.UNAVAILABLE);
Thread.sleep(retryBackoffMs + 1);
// If retryBackoffMs has passed, the resource should be available.
ensure(dataNode, HardwareState.AVAILABLE);
// A single timeout should make the node unavailable now
dataNode.onNodeTimeout();
ensure(dataNode, HardwareState.UNAVAILABLE);
// A single response should make the node available now
dataNode.onNodeResponse();
ensure(dataNode, HardwareState.AVAILABLE);
}
use of com.github.ambry.config.VerifiableProperties in project ambry by linkedin.
the class DataNodeTest method validateGetPort.
/**
* Validate {@link DataNodeId#getPortToConnectTo()} returns port type corresponding to the
* SSL enabled datacenter list specified in {@link ClusterMapConfig}.
* @throws Exception
*/
@Test
public void validateGetPort() throws Exception {
ClusterMapConfig clusterMapConfig;
Properties props = new Properties();
props.setProperty("clustermap.ssl.enabled.datacenters", "datacenter1,datacenter2,datacenter3");
props.setProperty("clustermap.cluster.name", "test");
props.setProperty("clustermap.datacenter.name", "dc1");
props.setProperty("clustermap.host.name", "localhost");
clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
System.out.println(clusterMapConfig.clusterMapSslEnabledDatacenters);
JSONObject jsonObject = TestUtils.getJsonDataNode(TestUtils.getLocalHost(), 6666, 7666, HardwareState.AVAILABLE, getDisks());
DataNode dataNode = new TestDataNode("datacenter2", jsonObject, clusterMapConfig);
assertEquals("The datacenter of the data node is in the ssl enabled datacenter list. SSL port should be returned", PortType.SSL, dataNode.getPortToConnectTo().getPortType());
dataNode = new TestDataNode("datacenter5", jsonObject, clusterMapConfig);
assertEquals("The datacenter of the data node is not in the ssl enabled datacenter list. Plaintext port should be returned", PortType.PLAINTEXT, dataNode.getPortToConnectTo().getPortType());
jsonObject.remove("sslport");
dataNode = new TestDataNode("datacenter1", jsonObject, clusterMapConfig);
try {
dataNode.getPortToConnectTo();
fail("Should have thrown Exception because there is no sslPort.");
} catch (IllegalStateException e) {
// The datacenter of the data node is in the ssl enabled datacenter list, but the data node does not have an ssl
// port to connect. Exception should be thrown.
}
}
Aggregations