use of com.linkedin.d2.balancer.util.hashing.Ring in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testClusterRecoveryFast1TC.
@Test(groups = { "small", "back-end" })
public void testClusterRecoveryFast1TC() {
Map<String, Object> myMap = new HashMap<String, Object>();
Long timeInterval = 5000L;
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
// This recovery level will put one point into the hash ring, which is good enough to
// send traffic to it because it is the only member of the cluster.
myMap.put("initialRecoverLevel", 0.01);
myMap.put(PropertyKeys.HTTP_LB_RING_RAMP_FACTOR, 2.0);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, timeInterval);
int stepsToFullRecovery = 0;
//test Strategy V3
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV3 strategyV3 = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null);
DegraderLoadBalancerStrategyAdapter strategy = new DegraderLoadBalancerStrategyAdapter(strategyV3);
clusterRecovery1TC(myMap, clock, stepsToFullRecovery, timeInterval, strategy, null, DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
//test Strategy V2
config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV2_1 strategyV2 = new DegraderLoadBalancerStrategyV2_1(config, "DegraderLoadBalancerTest", null);
strategy = new DegraderLoadBalancerStrategyAdapter(strategyV2);
clusterRecovery1TC(myMap, clock, stepsToFullRecovery, timeInterval, strategy, DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy.LOAD_BALANCE, null);
}
use of com.linkedin.d2.balancer.util.hashing.Ring in project rest.li by linkedin.
the class DegraderLoadBalancerTest method simulateAndTestOneInterval.
/**
* simulates calling all the clients with the given QPS according to the given interval.
* Then verify that the DegraderLoadBalancerState behaves as expected.
* @param expectedPointsPerClient we'll verify if the points are smaller than the number given here
* @param isCalledWithError
* @param isCalledWithErrorForLoadBalancing
*/
private TrackerClient simulateAndTestOneInterval(long timeInterval, TestClock clock, double qps, List<TrackerClient> clients, DegraderLoadBalancerStrategyAdapter adapter, long clusterGenerationId, Integer expectedPointsPerClient, boolean isExpectingDropCallStrategyForNewState, double expectedClusterOverrideDropRate, long latency, boolean isCalledWithError, boolean isCalledWithErrorForLoadBalancing) {
callClients(latency, qps, clients, clock, timeInterval, isCalledWithError, isCalledWithErrorForLoadBalancing);
//create any random URIRequest because we just need a URI to be hashed to get the point in hash ring anyway
if (clients != null && !clients.isEmpty()) {
URIRequest request = new URIRequest(clients.get(0).getUri());
TrackerClient client = getTrackerClient(adapter, request, new RequestContext(), clusterGenerationId, clients);
Map<URI, Integer> pointsMap = adapter.getPointsMap();
for (TrackerClient trackerClient : clients) {
Integer pointsInTheRing = pointsMap.get(trackerClient.getUri());
assertEquals(pointsInTheRing, expectedPointsPerClient);
}
if (isExpectingDropCallStrategyForNewState) {
assertTrue(adapter.isStrategyCallDrop());
} else {
assertFalse(adapter.isStrategyCallDrop());
}
assertEquals(adapter.getCurrentOverrideDropRate(), expectedClusterOverrideDropRate);
return client;
}
return null;
}
use of com.linkedin.d2.balancer.util.hashing.Ring in project rest.li by linkedin.
the class DegraderRingFactoryTest method testRandomChangePoints.
@Test(groups = { "small", "back-end" })
public void testRandomChangePoints() throws URISyntaxException {
int pointNum = 5;
int loopNum = 100;
Map<String, Integer> pointsMp = buildPointsMap(pointNum);
Map<String, Integer> maxPoints = new HashMap<>(pointNum);
Random random = new Random();
for (String uri : pointsMp.keySet()) {
maxPoints.put(uri, 100);
}
PointBasedConsistentHashRingFactory<String> ringFactory = new PointBasedConsistentHashRingFactory<>(new DegraderLoadBalancerStrategyConfig(1L));
Ring<String> ring = ringFactory.createRing(pointsMp);
assertNotNull(ring.get(1000));
for (int i = 0; i < loopNum; ++i) {
// new point list
for (String uri : pointsMp.keySet()) {
int newPoints = random.nextInt(200);
if (newPoints == 0) {
continue;
}
pointsMp.put(uri, newPoints);
if (newPoints > maxPoints.get(uri)) {
maxPoints.put(uri, ((newPoints + 3) / 4) * 4);
}
}
ring = ringFactory.createRing(pointsMp);
assertNotNull(ring.get(1000));
Map<String, List<Point<String>>> pointList = ringFactory.getPointsMap();
for (String uri : pointsMp.keySet()) {
assertEquals((int) maxPoints.get(uri), pointList.get(uri).size());
}
}
}
use of com.linkedin.d2.balancer.util.hashing.Ring in project rest.li by linkedin.
the class ConsistentHashKeyMapperTest method getConsistentHashKeyMapper.
private ConsistentHashKeyMapper getConsistentHashKeyMapper(RingFactory<URI> ringFactory) throws URISyntaxException {
String serviceName = "articles";
String clusterName = "cluster";
String path = "path";
String strategyName = "degrader";
//setup partition
Map<URI, Map<Integer, PartitionData>> partitionDescriptions = new HashMap<URI, Map<Integer, PartitionData>>();
final URI foo1 = new URI("http://foo1.com");
Map<Integer, PartitionData> foo1Data = new HashMap<Integer, PartitionData>();
foo1Data.put(0, new PartitionData(1.0));
partitionDescriptions.put(foo1, foo1Data);
final URI foo2 = new URI("http://foo2.com");
Map<Integer, PartitionData> foo2Data = new HashMap<Integer, PartitionData>();
foo2Data.put(3, new PartitionData(1.0));
foo2Data.put(4, new PartitionData(1.0));
partitionDescriptions.put(foo2, foo2Data);
final URI foo3 = new URI("http://foo3.com");
Map<Integer, PartitionData> foo3Data = new HashMap<Integer, PartitionData>();
foo3Data.put(0, new PartitionData(1.0));
partitionDescriptions.put(foo3, foo3Data);
final URI foo4 = new URI("http://foo4.com");
Map<Integer, PartitionData> foo4Data = new HashMap<Integer, PartitionData>();
foo4Data.put(1, new PartitionData(1.0));
partitionDescriptions.put(foo4, foo4Data);
final URI foo5 = new URI("http://foo5.com");
Map<Integer, PartitionData> foo5Data = new HashMap<Integer, PartitionData>();
foo5Data.put(1, new PartitionData(1.0));
partitionDescriptions.put(foo5, foo5Data);
final URI foo6 = new URI("http://foo6.com");
Map<Integer, PartitionData> foo6Data = new HashMap<Integer, PartitionData>();
foo6Data.put(1, new PartitionData(1.0));
partitionDescriptions.put(foo6, foo6Data);
//setup strategy which involves tweaking the hash ring to get partitionId -> URI host
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = new ArrayList<LoadBalancerState.SchemeStrategyPair>();
LoadBalancerStrategy strategy = new TestLoadBalancerStrategy(partitionDescriptions, ringFactory);
orderedStrategies.add(new LoadBalancerState.SchemeStrategyPair("http", strategy));
//setup the partition accessor which is used to get partitionId -> keys
PartitionAccessor accessor = new TestPartitionAccessor();
URI serviceURI = new URI("d2://" + serviceName);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(new PartitionedLoadBalancerTestState(clusterName, serviceName, path, strategyName, partitionDescriptions, orderedStrategies, accessor));
ConsistentHashKeyMapper mapper = new ConsistentHashKeyMapper(balancer, balancer);
return mapper;
}
use of com.linkedin.d2.balancer.util.hashing.Ring in project rest.li by linkedin.
the class TestScatterGather method getKeyToHostMapper.
private static ConsistentHashKeyMapper getKeyToHostMapper(int n, int partitionNum) throws URISyntaxException {
Map<URI, Integer> endpoints = new HashMap<URI, Integer>();
for (int ii = 0; ii < n; ++ii) {
endpoints.put(new URI("test" + String.valueOf(ii)), 100);
}
final int partitionSize = endpoints.size() / partitionNum;
List<Map<URI, Integer>> mapList = new ArrayList<Map<URI, Integer>>();
int count = 0;
for (final URI uri : endpoints.keySet()) {
final int index = count / partitionSize;
if (index == mapList.size()) {
mapList.add(new HashMap<URI, Integer>());
}
Map<URI, Integer> map = mapList.get(index);
map.put(uri, endpoints.get(uri));
count++;
}
List<Ring<URI>> rings = new ArrayList<Ring<URI>>();
for (final Map<URI, Integer> map : mapList) {
final ConsistentHashRing<URI> ring = new ConsistentHashRing<URI>(map);
rings.add(ring);
}
return new ConsistentHashKeyMapper(new StaticRingProvider(rings), new TestPartitionInfoProvider());
}
Aggregations