use of com.linkedin.d2.balancer.properties.PartitionProperties in project rest.li by linkedin.
the class TestD2Config method verifyPartitionProperties.
public static void verifyPartitionProperties(String cluster, Map<String, Object> propertiesMap) throws IOException, URISyntaxException, PropertyStoreException {
final ClusterProperties clusterprops = getClusterProperties(_zkclient, cluster);
if (propertiesMap.get("partitionProperties") != null) {
@SuppressWarnings("unchecked") Map<String, Object> properties = (Map<String, Object>) propertiesMap.get("partitionProperties");
PartitionProperties.PartitionType partitionType = PartitionProperties.PartitionType.valueOf((String) properties.get("partitionType"));
switch(partitionType) {
case RANGE:
{
long keyRangeStart = ((Number) properties.get("keyRangeStart")).longValue();
long partitionSize = ((Number) properties.get("partitionSize")).longValue();
int partitionCount = ((Number) properties.get("partitionCount")).intValue();
String regex = (String) properties.get("partitionKeyRegex");
RangeBasedPartitionProperties rbp = (RangeBasedPartitionProperties) clusterprops.getPartitionProperties();
assertEquals(keyRangeStart, rbp.getKeyRangeStart());
assertEquals(partitionSize, rbp.getPartitionSize());
assertEquals(partitionCount, rbp.getPartitionCount());
assertEquals(regex, rbp.getPartitionKeyRegex());
}
break;
case HASH:
{
int partitionCount = ((Number) properties.get("partitionCount")).intValue();
String regex = (String) properties.get("partitionKeyRegex");
String algorithm = (String) properties.get("hashAlgorithm");
HashBasedPartitionProperties.HashAlgorithm hashAlgorithm = HashBasedPartitionProperties.HashAlgorithm.valueOf(algorithm.toUpperCase());
HashBasedPartitionProperties hbp = (HashBasedPartitionProperties) clusterprops.getPartitionProperties();
assertEquals(partitionCount, hbp.getPartitionCount());
assertEquals(regex, hbp.getPartitionKeyRegex());
assertEquals(hashAlgorithm, hbp.getHashAlgorithm());
}
break;
default:
break;
}
}
}
use of com.linkedin.d2.balancer.properties.PartitionProperties in project rest.li by linkedin.
the class TestD2Config method testSingleClusterHashPartitions.
// preliminary test for partitioning cluster
@Test
public static void testSingleClusterHashPartitions() throws IOException, InterruptedException, URISyntaxException, Exception {
@SuppressWarnings("serial") final Map<String, List<String>> clustersData = new HashMap<String, List<String>>() {
{
put("partitioned-cluster", Arrays.asList(new String[] { "partitioned-service-1", "partitioned-service-2" }));
}
};
final Map<String, Object> partitionProperties = new HashMap<String, Object>();
Map<String, Object> hashBased = new HashMap<String, Object>();
hashBased.put("partitionKeyRegex", "\\bid\\b=(\\d+)");
hashBased.put("partitionCount", "10");
hashBased.put("hashAlgorithm", "modulo");
hashBased.put("partitionType", "HASH");
partitionProperties.put("partitionProperties", hashBased);
D2ConfigTestUtil d2Conf = new D2ConfigTestUtil(clustersData, partitionProperties);
assertEquals(d2Conf.runDiscovery(_zkHosts), 0);
verifyPartitionProperties("partitioned-cluster", partitionProperties);
final ClusterProperties clusterprops = getClusterProperties(_zkclient, "partitioned-cluster");
final PartitionAccessor accessor = PartitionAccessorFactory.getPartitionAccessor(clusterprops.getPartitionProperties());
assertEquals(0, accessor.getPartitionId(0 + ""));
assertEquals(9, accessor.getPartitionId(99 + ""));
assertEquals(6, accessor.getPartitionId(176 + ""));
assertEquals(3, accessor.getPartitionId(833 + ""));
}
use of com.linkedin.d2.balancer.properties.PartitionProperties in project rest.li by linkedin.
the class TestD2Config method testSingleClusterRangePartitions.
// preliminary test for partitioning cluster
@Test
public static void testSingleClusterRangePartitions() throws IOException, InterruptedException, URISyntaxException, Exception {
@SuppressWarnings("serial") final Map<String, List<String>> clustersData = new HashMap<String, List<String>>() {
{
put("partitioned-cluster", Arrays.asList(new String[] { "partitioned-service-1", "partitioned-service-2" }));
}
};
final Map<String, Object> partitionProperties = new HashMap<String, Object>();
Map<String, Object> rangeBased = new HashMap<String, Object>();
rangeBased.put("partitionKeyRegex", "\\bid\\b=(\\d+)");
rangeBased.put("keyRangeStart", "0");
rangeBased.put("partitionCount", "10");
rangeBased.put("partitionSize", "100");
rangeBased.put("partitionType", "RANGE");
partitionProperties.put("partitionProperties", rangeBased);
D2ConfigTestUtil d2Conf = new D2ConfigTestUtil(clustersData, partitionProperties);
assertEquals(d2Conf.runDiscovery(_zkHosts), 0);
verifyPartitionProperties("partitioned-cluster", partitionProperties);
final ClusterProperties clusterprops = getClusterProperties(_zkclient, "partitioned-cluster");
final PartitionAccessor accessor = PartitionAccessorFactory.getPartitionAccessor(clusterprops.getPartitionProperties());
try {
accessor.getPartitionId(-1 + "");
fail("Exception expected");
} catch (PartitionAccessException e) {
}
try {
accessor.getPartitionId(1000 + "");
fail("Exception expected");
} catch (PartitionAccessException e) {
}
assertEquals(0, accessor.getPartitionId(0 + ""));
assertEquals(0, accessor.getPartitionId(99 + ""));
assertEquals(1, accessor.getPartitionId(176 + ""));
assertEquals(8, accessor.getPartitionId(833 + ""));
final String legalUri1 = "/profiles?field=position&id=100";
final String legalUri2 = "/profiles?wid=99&id=176&randid=301";
final String illegalUri1 = "/profiles?wid=99";
final String illegalUri2 = "/profiles?id=1000000000000000000000000000000000000000000000111111111";
try {
accessor.getPartitionId(URI.create(illegalUri1));
fail("Exception expected");
} catch (PartitionAccessException e) {
}
try {
accessor.getPartitionId(URI.create(illegalUri2));
fail("Exception expected");
} catch (PartitionAccessException e) {
}
assertEquals(1, accessor.getPartitionId(URI.create(legalUri1)));
assertEquals(1, accessor.getPartitionId(URI.create(legalUri2)));
// Start Echo server on cluster-1
Map<Integer, Double> serverConfig1 = new HashMap<Integer, Double>();
serverConfig1.put(0, 0.5d);
serverConfig1.put(3, 0.5d);
Map<Integer, Double> serverConfig2 = new HashMap<Integer, Double>();
serverConfig2.put(0, 0.25d);
serverConfig2.put(1, 0.5d);
serverConfig2.put(2, 0.5d);
final int echoServerPort1 = 2346;
final int echoServerPort2 = 2347;
_echoServerList.add(startEchoServer(echoServerPort1, "partitioned-cluster", serverConfig1));
_echoServerList.add(startEchoServer(echoServerPort2, "partitioned-cluster", serverConfig2));
Map<URI, Map<Integer, Double>> partitionWeights = new HashMap<URI, Map<Integer, Double>>();
partitionWeights.put(URI.create("http://127.0.0.1:" + echoServerPort1 + "/partitioned-cluster"), serverConfig1);
partitionWeights.put(URI.create("http://127.0.0.1:" + echoServerPort2 + "/partitioned-cluster"), serverConfig2);
verifyPartitionedUriProperties("partitioned-cluster", partitionWeights);
}
use of com.linkedin.d2.balancer.properties.PartitionProperties in project rest.li by linkedin.
the class PartitionPropertiesConverterTest method testHashMD5PartitionProperties.
@Test
public void testHashMD5PartitionProperties() {
final String partitionKeyRegex = "/foo/bar/(\\d+)";
final int partitionCount = 8;
final HashBasedPartitionProperties.HashAlgorithm hashAlgorithm = HashBasedPartitionProperties.HashAlgorithm.MD5;
PartitionProperties partitionProperties = new HashBasedPartitionProperties(partitionKeyRegex, partitionCount, hashAlgorithm);
D2ClusterPartitionConfiguration.PartitionTypeSpecificData data = new D2ClusterPartitionConfiguration.PartitionTypeSpecificData();
data.setHashAlgorithm(com.linkedin.d2.hashAlgorithm.MD5);
D2ClusterPartitionConfiguration partitionConfig = new D2ClusterPartitionConfiguration().setType(PartitionTypeEnum.HASH).setPartitionKeyRegex(partitionKeyRegex).setPartitionCount(partitionCount).setPartitionTypeSpecificData(data);
Assert.assertEquals(PartitionPropertiesConverter.toProperties(partitionConfig), partitionProperties);
Assert.assertEquals(PartitionPropertiesConverter.toConfig(partitionProperties), partitionConfig);
}
use of com.linkedin.d2.balancer.properties.PartitionProperties in project rest.li by linkedin.
the class PartitionPropertiesConverterTest method testRangePartitionProperties.
@Test
public void testRangePartitionProperties() {
final String partitionKeyRegex = "/foo/bar/(\\d+)";
final long keyRangeStart = 1;
final long paritionSize = 1024;
final int partitionCount = 32;
PartitionProperties partitionProperties = new RangeBasedPartitionProperties(partitionKeyRegex, keyRangeStart, paritionSize, partitionCount);
D2ClusterPartitionConfiguration.PartitionTypeSpecificData data = new D2ClusterPartitionConfiguration.PartitionTypeSpecificData();
data.setRangedPartitionProperties(new rangedPartitionProperties().setKeyRangeStart(keyRangeStart).setPartitionSize(paritionSize));
D2ClusterPartitionConfiguration partitionConfig = new D2ClusterPartitionConfiguration().setType(PartitionTypeEnum.RANGE).setPartitionKeyRegex(partitionKeyRegex).setPartitionCount(partitionCount).setPartitionTypeSpecificData(data);
Assert.assertEquals(PartitionPropertiesConverter.toProperties(partitionConfig), partitionProperties);
Assert.assertEquals(PartitionPropertiesConverter.toConfig(partitionProperties), partitionConfig);
}
Aggregations