use of com.linkedin.d2.balancer.strategies.degrader.DegraderRingFactory in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testDegraderLoadBalancerStateComparison.
@Test(groups = { "small", "back-end" })
public void testDegraderLoadBalancerStateComparison() throws URISyntaxException {
long clusterGenerationId = 1;
long lastUpdated = 29999;
long updateIntervalMs = 5000;
DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy strategy = DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy.LOAD_BALANCE;
long currentAverageClusterLatency = 3000;
Map<String, Object> configMap = new HashMap<String, Object>();
configMap.put(PropertyKeys.HTTP_LB_LOW_WATER_MARK, 500d);
configMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_POINTS_PER_WEIGHT, 120);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(configMap);
long clusterCallCount = 15;
Map<TrackerClient, DegraderLoadBalancerQuarantine> quarantineMap = new HashMap<>();
Map<TrackerClient, DegraderLoadBalancerQuarantine> quarantineStore = new HashMap<>();
double currentOverrideDropRate = 0.4;
boolean initialized = true;
String name = "degraderV2";
Map<URI, Integer> points = new HashMap<URI, Integer>();
Map<TrackerClient, Double> recoveryMap = new HashMap<TrackerClient, Double>();
URI uri1 = new URI("http://test.linkedin.com:10010/abc0");
URI uri2 = new URI("http://test.linkedin.com:10010/abc1");
URI uri3 = new URI("http://test.linkedin.com:10010/abc2");
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
RingFactory<URI> ringFactory = new DegraderRingFactory<>(config);
TestClock clock = new TestClock();
List<TrackerClient> clients = createTrackerClient(3, clock, null);
List<TrackerClientUpdater> clientUpdaters = new ArrayList<TrackerClientUpdater>();
for (TrackerClient client : clients) {
recoveryMap.put(client, 0.0);
clientUpdaters.add(new TrackerClientUpdater(client, DEFAULT_PARTITION_ID));
}
DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState oldStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId + 1, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
//we don't care about last updated
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated + 30, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
points.put(uri1, 30);
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
points.put(uri1, 100);
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate - 0.1, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
//we don't care about averageClusterLatency as far as for printing the state
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency + 3, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
for (TrackerClient client : clients) {
recoveryMap.put(client, 0.3);
}
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
//test state health comparison
assertFalse(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//make cluster average latency to be 300 to be lower than lowWaterMark but still not healthy because
//points map has clients with less than perfect health
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, 300, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//make all points to have 120 so the cluster becomes "healthy"
points.put(uri1, 120);
points.put(uri2, 120);
points.put(uri3, 120);
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, 300, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//if currentAverageClusterLatency is > low water mark then cluster becomes unhealthy
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//test DegraderLoadBalancerV3
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy strategyV3 = DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING;
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState oldStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId + 1, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated + 300, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 77);
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 50);
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate + 0.4, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency + 55, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
//we don't care about averageClusterLatency for comparing states
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
for (TrackerClient client : clients) {
recoveryMap.put(client, 0.5);
}
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
//test state health comparison
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
//make cluster average latency to be 300 to be lower than lowWaterMark but still not healthy because
//points map has clients with less than perfect health
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
//make all points to have 120 so the cluster becomes "healthy"
points.put(uri1, 120);
points.put(uri2, 120);
points.put(uri3, 120);
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
//if currentAverageClusterLatency is > low water mark then cluster becomes unhealthy
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderRingFactory in project rest.li by linkedin.
the class PointBasedConsistentHashRingFactory method clearPoints.
/**
* Check and clean up the points maintained by the factory
*
* DegraderRingFactory keep a copy of all points. It is possible that some of the URIs are already
* dead therefore save the points for those URIs are meaningless. However we do not want to immediately
* follow individual URI changes as well because: 1. it is costly. 2. when a service is bounced, the URI
* is gone and comes back again, so it makes sense if the URI can be kept around for some time.
*
* We decided to use the number of unused entries as the criteria -- the more unused entries, the higher
* probability some of them are dead. When the unused entry number reaches up to the given threshold,
* it's time to do the cleanup. For simplicity, all points will be purged and re-generated.
*
* HTTP_LB_HASHRING_POINT_CLEANUP_RATE defines the ratio of unused entries against the total entries. It
* is configurable from cfg2. Also POINTS_CLEANUP_MIN_UNUSED_ENTRY is used to make sure we do not waste
* time on clean up when the total host number is small.
*
* @param size: the size of new URI list
*/
private void clearPoints(int size) {
int unusedEntries = _ringPoints.size() - size;
int unusedEntryThreshold = (int) (_ringPoints.size() * _config.getHashRingPointCleanUpRate());
if (unusedEntries > Math.max(unusedEntryThreshold, POINTS_CLEANUP_MIN_UNUSED_ENTRY)) {
_ringPoints.clear();
}
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderRingFactory in project rest.li by linkedin.
the class ConsistentHashRingTest method testManyItemsEqualWeight.
@Test(groups = { "small", "back-end" })
public void testManyItemsEqualWeight() {
Map<String, Integer> many = new HashMap<String, Integer>();
Map<String, AtomicInteger> counts = new HashMap<String, AtomicInteger>();
for (int i = 0; i < 100; ++i) {
many.put("test" + i, 10);
counts.put("test" + i, new AtomicInteger());
}
DegraderRingFactory<String> ringFactory = new DegraderRingFactory<>(new DegraderLoadBalancerStrategyConfig(1L));
ConsistentHashRing<String> test = (ConsistentHashRing<String>) ringFactory.createRing(many);
assertNotNull(test.get(0));
// verify that each test item has 10 points on the ring
List<ConsistentHashRing.Point<String>> points = test.getPoints();
for (int i = 0; i < points.size(); ++i) {
counts.get(points.get(i).getT()).incrementAndGet();
}
for (Entry<String, AtomicInteger> count : counts.entrySet()) {
assertEquals(count.getValue().get(), 10);
}
double highLowDiff = test.getHighLowDiffOfAreaRing();
assertTrue(highLowDiff < 1.54, "Hash Ring area diff is greater than it should be, saw diff of: " + highLowDiff);
}
Aggregations