use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class TestD2Config method testSingleClusterHashPartitions.
// preliminary test for partitioning cluster
@Test
public static void testSingleClusterHashPartitions() throws IOException, InterruptedException, URISyntaxException, Exception {
@SuppressWarnings("serial") final Map<String, List<String>> clustersData = new HashMap<String, List<String>>() {
{
put("partitioned-cluster", Arrays.asList(new String[] { "partitioned-service-1", "partitioned-service-2" }));
}
};
final Map<String, Object> partitionProperties = new HashMap<String, Object>();
Map<String, Object> hashBased = new HashMap<String, Object>();
hashBased.put("partitionKeyRegex", "\\bid\\b=(\\d+)");
hashBased.put("partitionCount", "10");
hashBased.put("hashAlgorithm", "modulo");
hashBased.put("partitionType", "HASH");
partitionProperties.put("partitionProperties", hashBased);
D2ConfigTestUtil d2Conf = new D2ConfigTestUtil(clustersData, partitionProperties);
assertEquals(d2Conf.runDiscovery(_zkHosts), 0);
verifyPartitionProperties("partitioned-cluster", partitionProperties);
final ClusterProperties clusterprops = getClusterProperties(_zkclient, "partitioned-cluster");
final PartitionAccessor accessor = PartitionAccessorFactory.getPartitionAccessor(clusterprops.getPartitionProperties());
assertEquals(0, accessor.getPartitionId(0 + ""));
assertEquals(9, accessor.getPartitionId(99 + ""));
assertEquals(6, accessor.getPartitionId(176 + ""));
assertEquals(3, accessor.getPartitionId(833 + ""));
}
use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class TestD2Config method testSingleClusterRangePartitions.
// preliminary test for partitioning cluster
@Test
public static void testSingleClusterRangePartitions() throws IOException, InterruptedException, URISyntaxException, Exception {
@SuppressWarnings("serial") final Map<String, List<String>> clustersData = new HashMap<String, List<String>>() {
{
put("partitioned-cluster", Arrays.asList(new String[] { "partitioned-service-1", "partitioned-service-2" }));
}
};
final Map<String, Object> partitionProperties = new HashMap<String, Object>();
Map<String, Object> rangeBased = new HashMap<String, Object>();
rangeBased.put("partitionKeyRegex", "\\bid\\b=(\\d+)");
rangeBased.put("keyRangeStart", "0");
rangeBased.put("partitionCount", "10");
rangeBased.put("partitionSize", "100");
rangeBased.put("partitionType", "RANGE");
partitionProperties.put("partitionProperties", rangeBased);
D2ConfigTestUtil d2Conf = new D2ConfigTestUtil(clustersData, partitionProperties);
assertEquals(d2Conf.runDiscovery(_zkHosts), 0);
verifyPartitionProperties("partitioned-cluster", partitionProperties);
final ClusterProperties clusterprops = getClusterProperties(_zkclient, "partitioned-cluster");
final PartitionAccessor accessor = PartitionAccessorFactory.getPartitionAccessor(clusterprops.getPartitionProperties());
try {
accessor.getPartitionId(-1 + "");
fail("Exception expected");
} catch (PartitionAccessException e) {
}
try {
accessor.getPartitionId(1000 + "");
fail("Exception expected");
} catch (PartitionAccessException e) {
}
assertEquals(0, accessor.getPartitionId(0 + ""));
assertEquals(0, accessor.getPartitionId(99 + ""));
assertEquals(1, accessor.getPartitionId(176 + ""));
assertEquals(8, accessor.getPartitionId(833 + ""));
final String legalUri1 = "/profiles?field=position&id=100";
final String legalUri2 = "/profiles?wid=99&id=176&randid=301";
final String illegalUri1 = "/profiles?wid=99";
final String illegalUri2 = "/profiles?id=1000000000000000000000000000000000000000000000111111111";
try {
accessor.getPartitionId(URI.create(illegalUri1));
fail("Exception expected");
} catch (PartitionAccessException e) {
}
try {
accessor.getPartitionId(URI.create(illegalUri2));
fail("Exception expected");
} catch (PartitionAccessException e) {
}
assertEquals(1, accessor.getPartitionId(URI.create(legalUri1)));
assertEquals(1, accessor.getPartitionId(URI.create(legalUri2)));
// Start Echo server on cluster-1
Map<Integer, Double> serverConfig1 = new HashMap<Integer, Double>();
serverConfig1.put(0, 0.5d);
serverConfig1.put(3, 0.5d);
Map<Integer, Double> serverConfig2 = new HashMap<Integer, Double>();
serverConfig2.put(0, 0.25d);
serverConfig2.put(1, 0.5d);
serverConfig2.put(2, 0.5d);
final int echoServerPort1 = 2346;
final int echoServerPort2 = 2347;
_echoServerList.add(startEchoServer(echoServerPort1, "partitioned-cluster", serverConfig1));
_echoServerList.add(startEchoServer(echoServerPort2, "partitioned-cluster", serverConfig2));
Map<URI, Map<Integer, Double>> partitionWeights = new HashMap<URI, Map<Integer, Double>>();
partitionWeights.put(URI.create("http://127.0.0.1:" + echoServerPort1 + "/partitioned-cluster"), serverConfig1);
partitionWeights.put(URI.create("http://127.0.0.1:" + echoServerPort2 + "/partitioned-cluster"), serverConfig2);
verifyPartitionedUriProperties("partitioned-cluster", partitionWeights);
}
use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class ZKFSTest method testKeyMapper.
@Test
public void testKeyMapper() throws Exception {
final String TEST_SERVICE_NAME = "test-service";
final String TEST_CLUSTER_NAME = "test-cluster";
final URI TEST_SERVER_URI1 = URI.create("http://test-host-1/");
final URI TEST_SERVER_URI2 = URI.create("http://test-host-2/");
final int NUM_ITERATIONS = 5;
startServer();
try {
ZKFSLoadBalancer balancer = getBalancer();
FutureCallback<None> callback = new FutureCallback<None>();
balancer.start(callback);
callback.get(30, TimeUnit.SECONDS);
ZKConnection conn = balancer.zkConnection();
ZooKeeperPermanentStore<ServiceProperties> serviceStore = new ZooKeeperPermanentStore<ServiceProperties>(conn, new ServicePropertiesJsonSerializer(), ZKFSUtil.servicePath(BASE_PATH));
ServiceProperties props = new ServiceProperties(TEST_SERVICE_NAME, TEST_CLUSTER_NAME, "/test", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, Arrays.asList("http"), null);
serviceStore.put(TEST_SERVICE_NAME, props);
ClusterProperties clusterProperties = new ClusterProperties(TEST_CLUSTER_NAME);
ZooKeeperPermanentStore<ClusterProperties> clusterStore = new ZooKeeperPermanentStore<ClusterProperties>(conn, new ClusterPropertiesJsonSerializer(), ZKFSUtil.clusterPath(BASE_PATH));
clusterStore.put(TEST_CLUSTER_NAME, clusterProperties);
ZooKeeperEphemeralStore<UriProperties> uriStore = new ZooKeeperEphemeralStore<UriProperties>(conn, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), ZKFSUtil.uriPath(BASE_PATH), false, true);
Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<URI, Map<Integer, PartitionData>>();
Map<Integer, PartitionData> partitionData = new HashMap<Integer, PartitionData>(1);
partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1.0d));
uriData.put(TEST_SERVER_URI1, partitionData);
uriData.put(TEST_SERVER_URI2, partitionData);
UriProperties uriProps = new UriProperties(TEST_CLUSTER_NAME, uriData);
callback = new FutureCallback<None>();
uriStore.start(callback);
callback.get(30, TimeUnit.SECONDS);
uriStore.put(TEST_CLUSTER_NAME, uriProps);
Set<Integer> keys = new HashSet<Integer>();
for (int ii = 0; ii < 100; ++ii) {
keys.add(ii);
}
for (int ii = 0; ii < NUM_ITERATIONS; ++ii) {
KeyMapper mapper = balancer.getKeyMapper();
MapKeyResult<URI, Integer> batches = mapper.mapKeysV2(URI.create("d2://" + TEST_SERVICE_NAME), keys);
Assert.assertEquals(batches.getMapResult().size(), 2);
for (Map.Entry<URI, Collection<Integer>> oneBatch : batches.getMapResult().entrySet()) {
Assert.assertTrue(oneBatch.getKey().toString().startsWith("http://test-host-"));
Assert.assertTrue(keys.containsAll(oneBatch.getValue()));
}
}
} finally {
stopServer();
}
}
use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class ZKFSTest method testClusterDirectory.
@Test
public void testClusterDirectory() throws Exception {
final String TEST_CLUSTER_NAME = "testingService";
startServer();
try {
ZKFSLoadBalancer balancer = getBalancer();
FutureCallback<None> callback = new FutureCallback<None>();
balancer.start(callback);
callback.get(30, TimeUnit.SECONDS);
Directory dir = balancer.getDirectory();
ZKConnection conn = new ZKConnection("localhost:" + PORT, 30000);
conn.start();
ZooKeeperPermanentStore<ClusterProperties> store = new ZooKeeperPermanentStore<ClusterProperties>(conn, new ClusterPropertiesJsonSerializer(), ZKFSUtil.clusterPath(BASE_PATH));
callback = new FutureCallback<None>();
store.start(callback);
callback.get(30, TimeUnit.SECONDS);
ClusterProperties props = new ClusterProperties(TEST_CLUSTER_NAME);
store.put(TEST_CLUSTER_NAME, props);
FutureCallback<List<String>> clusterCallback = new FutureCallback<List<String>>();
dir.getClusterNames(clusterCallback);
Assert.assertEquals(clusterCallback.get(30, TimeUnit.SECONDS), Collections.singletonList(TEST_CLUSTER_NAME));
} finally {
stopServer();
}
}
use of com.linkedin.d2.balancer.properties.ClusterProperties in project rest.li by linkedin.
the class TestD2Config method verifyClusterProperties.
public static void verifyClusterProperties(String cluster) throws IOException, URISyntaxException, PropertyStoreException {
ClusterProperties clusterprops = getClusterProperties(_zkclient, cluster);
assertEquals(clusterprops.getClusterName(), cluster);
assertEquals(clusterprops.getPrioritizedSchemes(), Arrays.asList(new String[] { "http" }));
assertEquals(clusterprops.getProperties().get("requestTimeout"), String.valueOf(10000));
assertEquals(clusterprops.getBanned(), new TreeSet<URI>());
}
Aggregations