use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.
the class PartitionPropertiesConverter method toProperties.
public static PartitionProperties toProperties(D2ClusterPartitionConfiguration config) {
final PartitionProperties partitionProperties;
switch(config.getType()) {
case RANGE:
{
rangedPartitionProperties rangedPartitionProperties = config.getPartitionTypeSpecificData().getRangedPartitionProperties();
partitionProperties = new RangeBasedPartitionProperties(config.getPartitionKeyRegex(), rangedPartitionProperties.getKeyRangeStart(), rangedPartitionProperties.getPartitionSize(), config.getPartitionCount());
break;
}
case HASH:
HashBasedPartitionProperties.HashAlgorithm algorithm;
switch(config.getPartitionTypeSpecificData().getHashAlgorithm()) {
case MODULO:
algorithm = HashBasedPartitionProperties.HashAlgorithm.MODULO;
break;
case MD5:
algorithm = HashBasedPartitionProperties.HashAlgorithm.MD5;
break;
default:
throw new IllegalArgumentException("Unsupported hash algorithm: " + config.getPartitionTypeSpecificData().getHashAlgorithm());
}
partitionProperties = new HashBasedPartitionProperties(config.getPartitionKeyRegex(), config.getPartitionCount(), algorithm);
break;
case NONE:
partitionProperties = NullPartitionProperties.getInstance();
break;
default:
throw new IllegalArgumentException("Unsupported partitionType: " + config.getType());
}
return partitionProperties;
}
use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.
the class PartitionPropertiesConverter method toConfig.
public static D2ClusterPartitionConfiguration toConfig(PartitionProperties property) {
final D2ClusterPartitionConfiguration config;
final D2ClusterPartitionConfiguration.PartitionTypeSpecificData specificData;
switch(property.getPartitionType()) {
case RANGE:
RangeBasedPartitionProperties range = (RangeBasedPartitionProperties) property;
config = new D2ClusterPartitionConfiguration();
config.setType(PartitionTypeEnum.RANGE);
config.setPartitionKeyRegex(range.getPartitionKeyRegex());
config.setPartitionCount(range.getPartitionCount());
specificData = new D2ClusterPartitionConfiguration.PartitionTypeSpecificData();
rangedPartitionProperties rangedPartitionProperties = new rangedPartitionProperties();
rangedPartitionProperties.setKeyRangeStart(range.getKeyRangeStart());
rangedPartitionProperties.setPartitionSize(range.getPartitionSize());
specificData.setRangedPartitionProperties(rangedPartitionProperties);
config.setPartitionTypeSpecificData(specificData);
break;
case HASH:
HashBasedPartitionProperties hash = (HashBasedPartitionProperties) property;
config = new D2ClusterPartitionConfiguration();
config.setType(PartitionTypeEnum.HASH);
config.setPartitionKeyRegex(hash.getPartitionKeyRegex());
config.setPartitionCount(hash.getPartitionCount());
specificData = new D2ClusterPartitionConfiguration.PartitionTypeSpecificData();
specificData.setHashAlgorithm(hashAlgorithm.valueOf(hash.getHashAlgorithm().name()));
config.setPartitionTypeSpecificData(specificData);
break;
case NONE:
config = new D2ClusterPartitionConfiguration();
config.setType(PartitionTypeEnum.NONE);
break;
default:
throw new IllegalArgumentException("Unsupported partitionType: " + property.getPartitionType());
}
return config;
}
use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.
the class SimpleLoadBalancerStateTest method testRegister.
@Test(groups = { "small", "back-end" })
public void testRegister() {
reset();
TestListener listener = new TestListener();
List<String> schemes = new ArrayList<String>();
schemes.add("http");
_state.register(listener);
assertNull(listener.scheme);
assertNull(listener.strategy);
assertNull(listener.serviceName);
// trigger a strategy add
// first add a cluster
_state.listenToCluster("cluster-1", new NullStateListenerCallback());
_clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
// then add a service
_state.listenToService("service-1", new NullStateListenerCallback());
_serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random"), Collections.<String, Object>emptyMap(), null, null, schemes, null));
// this should trigger a refresh
assertEquals(listener.scheme, "http");
assertTrue(listener.strategy instanceof RandomLoadBalancerStrategy);
assertEquals(listener.serviceName, "service-1");
// then update the cluster
_clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
// this triggered a second refresh, but also an onStrategyRemoved. The onStrategyRemoved should
// be done first, and then the onStrategyAdd, so we should still see a valid strategy.
assertEquals(listener.scheme, "http");
assertTrue(listener.strategy instanceof RandomLoadBalancerStrategy);
assertEquals(listener.serviceName, "service-1");
_state.listenToCluster("partition-cluster-1", new NullStateListenerCallback());
_clusterRegistry.put("partition-cluster-1", new ClusterProperties("partition-cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 100, 2)));
_state.listenToService("partition-service-1", new NullStateListenerCallback());
_serviceRegistry.put("partition-service-1", new ServiceProperties("partition-service-1", "partition-cluster-1", "/partition-test", Arrays.asList("degraderV3"), Collections.<String, Object>emptyMap(), null, null, schemes, null));
assertEquals(listener.scheme, "http");
assertTrue(listener.strategy instanceof DegraderLoadBalancerStrategyV3);
}
use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.
the class TestD2Config method verifyPartitionProperties.
public static void verifyPartitionProperties(String cluster, Map<String, Object> propertiesMap) throws IOException, URISyntaxException, PropertyStoreException {
final ClusterProperties clusterprops = getClusterProperties(_zkclient, cluster);
if (propertiesMap.get("partitionProperties") != null) {
@SuppressWarnings("unchecked") Map<String, Object> properties = (Map<String, Object>) propertiesMap.get("partitionProperties");
PartitionProperties.PartitionType partitionType = PartitionProperties.PartitionType.valueOf((String) properties.get("partitionType"));
switch(partitionType) {
case RANGE:
{
long keyRangeStart = ((Number) properties.get("keyRangeStart")).longValue();
long partitionSize = ((Number) properties.get("partitionSize")).longValue();
int partitionCount = ((Number) properties.get("partitionCount")).intValue();
String regex = (String) properties.get("partitionKeyRegex");
RangeBasedPartitionProperties rbp = (RangeBasedPartitionProperties) clusterprops.getPartitionProperties();
assertEquals(keyRangeStart, rbp.getKeyRangeStart());
assertEquals(partitionSize, rbp.getPartitionSize());
assertEquals(partitionCount, rbp.getPartitionCount());
assertEquals(regex, rbp.getPartitionKeyRegex());
}
break;
case HASH:
{
int partitionCount = ((Number) properties.get("partitionCount")).intValue();
String regex = (String) properties.get("partitionKeyRegex");
String algorithm = (String) properties.get("hashAlgorithm");
HashBasedPartitionProperties.HashAlgorithm hashAlgorithm = HashBasedPartitionProperties.HashAlgorithm.valueOf(algorithm.toUpperCase());
HashBasedPartitionProperties hbp = (HashBasedPartitionProperties) clusterprops.getPartitionProperties();
assertEquals(partitionCount, hbp.getPartitionCount());
assertEquals(regex, hbp.getPartitionKeyRegex());
assertEquals(hashAlgorithm, hbp.getHashAlgorithm());
}
break;
default:
break;
}
}
}
use of com.linkedin.d2.balancer.properties.RangeBasedPartitionProperties in project rest.li by linkedin.
the class SimpleLoadBalancerStateTest method testClientsShutdownAfterPropertyUpdatesStreamRequest.
@Test(groups = { "small", "back-end" })
public void testClientsShutdownAfterPropertyUpdatesStreamRequest() throws URISyntaxException, InterruptedException {
reset();
URI uri = URI.create("http://cluster-1/test");
List<String> schemes = new ArrayList<String>();
Map<Integer, PartitionData> partitionData = new HashMap<Integer, PartitionData>(1);
partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<URI, Map<Integer, PartitionData>>();
uriData.put(uri, partitionData);
schemes.add("http");
// set up state
_state.listenToService("service-1", new NullStateListenerCallback());
_state.listenToCluster("cluster-1", new NullStateListenerCallback());
_state.setDelayedExecution(0);
_serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random"), Collections.<String, Object>emptyMap(), Collections.<String, Object>emptyMap(), Collections.<String, String>emptyMap(), schemes, Collections.<URI>emptySet()));
_clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
_uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));
URI uri1 = URI.create("http://partition-cluster-1/test1");
URI uri2 = URI.create("http://partition-cluster-1/test2");
_state.listenToCluster("partition-cluster-1", new NullStateListenerCallback());
_clusterRegistry.put("partition-cluster-1", new ClusterProperties("partition-cluster-1", null, new HashMap<String, String>(), new HashSet<URI>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 100, 2)));
_state.listenToService("partition-service-1", new NullStateListenerCallback());
_serviceRegistry.put("partition-service-1", new ServiceProperties("partition-service-1", "partition-cluster-1", "/partition-test", Arrays.asList("degraderV3"), Collections.<String, Object>emptyMap(), Collections.<String, Object>emptyMap(), Collections.<String, String>emptyMap(), schemes, Collections.<URI>emptySet()));
Map<Integer, PartitionData> partitionWeight = new HashMap<Integer, PartitionData>();
partitionWeight.put(0, new PartitionData(1d));
partitionWeight.put(1, new PartitionData(2d));
Map<URI, Map<Integer, PartitionData>> partitionDesc = new HashMap<URI, Map<Integer, PartitionData>>();
partitionDesc.put(uri1, partitionWeight);
partitionWeight.remove(0);
partitionWeight.put(2, new PartitionData(1d));
partitionDesc.put(uri2, partitionWeight);
_uriRegistry.put("partition-cluster-1", new UriProperties("partition-cluster-1", partitionDesc));
TrackerClient client1 = _state.getClient("partition-service-1", uri1);
TrackerClient client2 = _state.getClient("partition-service-1", uri2);
assertEquals(client2.getPartitionWeight(1), 2d);
assertEquals(client2.getPartitionWeight(2), 1d);
assertEquals(client1.getPartitionWeight(1), 2d);
// Get client, then refresh cluster
TrackerClient client = _state.getClient("service-1", uri);
client.streamRequest(new StreamRequestBuilder(URI.create("d2://service-1/foo")).build(EntityStreams.emptyStream()), new RequestContext(), Collections.<String, String>emptyMap(), new TransportCallbackAdapter<StreamResponse>(Callbacks.<StreamResponse>empty()));
// now force a refresh by adding cluster
_clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));
// Get client, then refresh service
client = _state.getClient("service-1", uri);
client.streamRequest(new StreamRequestBuilder(URI.create("d2://service-1/foo")).build(EntityStreams.emptyStream()), new RequestContext(), Collections.<String, String>emptyMap(), new TransportCallbackAdapter<StreamResponse>(Callbacks.<StreamResponse>empty()));
// refresh by adding service
_serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random"), Collections.<String, Object>emptyMap(), null, null, schemes, null));
// Get client, then mark server up/down
client = _state.getClient("service-1", uri);
client.streamRequest(new StreamRequestBuilder(URI.create("d2://service-1/foo")).build(EntityStreams.emptyStream()), new RequestContext(), Collections.<String, String>emptyMap(), new TransportCallbackAdapter<StreamResponse>(Callbacks.<StreamResponse>empty()));
_uriRegistry.put("cluster-1", new UriProperties("cluster-1", Collections.<URI, Map<Integer, PartitionData>>emptyMap()));
_uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));
// Get the client one last time
client = _state.getClient("service-1", uri);
client.streamRequest(new StreamRequestBuilder(URI.create("d2://service-1/foo")).build(EntityStreams.emptyStream()), new RequestContext(), Collections.<String, String>emptyMap(), new TransportCallbackAdapter<StreamResponse>(Callbacks.<StreamResponse>empty()));
TestShutdownCallback callback = new TestShutdownCallback();
_state.shutdown(callback);
assertTrue(callback.await(10, TimeUnit.SECONDS), "Failed to shut down state");
for (TransportClientFactory factory : _clientFactories.values()) {
SimpleLoadBalancerTest.DoNothingClientFactory f = (SimpleLoadBalancerTest.DoNothingClientFactory) factory;
assertEquals(f.getRunningClientCount(), 0, "not all clients were shut down");
}
}
Aggregations