use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyConfig in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testClusterRecoveryFast1TC.
@Test(groups = { "small", "back-end" })
public void testClusterRecoveryFast1TC() {
Map<String, Object> myMap = new HashMap<String, Object>();
Long timeInterval = 5000L;
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
// This recovery level will put one point into the hash ring, which is good enough to
// send traffic to it because it is the only member of the cluster.
myMap.put("initialRecoverLevel", 0.01);
myMap.put(PropertyKeys.HTTP_LB_RING_RAMP_FACTOR, 2.0);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, timeInterval);
int stepsToFullRecovery = 0;
//test Strategy V3
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV3 strategyV3 = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null);
DegraderLoadBalancerStrategyAdapter strategy = new DegraderLoadBalancerStrategyAdapter(strategyV3);
clusterRecovery1TC(myMap, clock, stepsToFullRecovery, timeInterval, strategy, null, DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
//test Strategy V2
config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV2_1 strategyV2 = new DegraderLoadBalancerStrategyV2_1(config, "DegraderLoadBalancerTest", null);
strategy = new DegraderLoadBalancerStrategyAdapter(strategyV2);
clusterRecovery1TC(myMap, clock, stepsToFullRecovery, timeInterval, strategy, DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy.LOAD_BALANCE, null);
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyConfig in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testDegraderLoadBalancerStateComparison.
@Test(groups = { "small", "back-end" })
public void testDegraderLoadBalancerStateComparison() throws URISyntaxException {
long clusterGenerationId = 1;
long lastUpdated = 29999;
long updateIntervalMs = 5000;
DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy strategy = DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy.LOAD_BALANCE;
long currentAverageClusterLatency = 3000;
Map<String, Object> configMap = new HashMap<String, Object>();
configMap.put(PropertyKeys.HTTP_LB_LOW_WATER_MARK, 500d);
configMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_POINTS_PER_WEIGHT, 120);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(configMap);
long clusterCallCount = 15;
Map<TrackerClient, DegraderLoadBalancerQuarantine> quarantineMap = new HashMap<>();
Map<TrackerClient, DegraderLoadBalancerQuarantine> quarantineStore = new HashMap<>();
double currentOverrideDropRate = 0.4;
boolean initialized = true;
String name = "degraderV2";
Map<URI, Integer> points = new HashMap<URI, Integer>();
Map<TrackerClient, Double> recoveryMap = new HashMap<TrackerClient, Double>();
URI uri1 = new URI("http://test.linkedin.com:10010/abc0");
URI uri2 = new URI("http://test.linkedin.com:10010/abc1");
URI uri3 = new URI("http://test.linkedin.com:10010/abc2");
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
RingFactory<URI> ringFactory = new DegraderRingFactory<>(config);
TestClock clock = new TestClock();
List<TrackerClient> clients = createTrackerClient(3, clock, null);
List<TrackerClientUpdater> clientUpdaters = new ArrayList<TrackerClientUpdater>();
for (TrackerClient client : clients) {
recoveryMap.put(client, 0.0);
clientUpdaters.add(new TrackerClientUpdater(client, DEFAULT_PARTITION_ID));
}
DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState oldStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId + 1, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
//we don't care about last updated
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated + 30, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
points.put(uri1, 30);
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
points.put(uri1, 100);
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate - 0.1, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
//we don't care about averageClusterLatency as far as for printing the state
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency + 3, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
for (TrackerClient client : clients) {
recoveryMap.put(client, 0.3);
}
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
//test state health comparison
assertFalse(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//make cluster average latency to be 300 to be lower than lowWaterMark but still not healthy because
//points map has clients with less than perfect health
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, 300, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//make all points to have 120 so the cluster becomes "healthy"
points.put(uri1, 120);
points.put(uri2, 120);
points.put(uri3, 120);
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, 300, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//if currentAverageClusterLatency is > low water mark then cluster becomes unhealthy
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//test DegraderLoadBalancerV3
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy strategyV3 = DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING;
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState oldStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId + 1, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated + 300, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 77);
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 50);
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate + 0.4, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency + 55, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
//we don't care about averageClusterLatency for comparing states
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
for (TrackerClient client : clients) {
recoveryMap.put(client, 0.5);
}
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
//test state health comparison
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
//make cluster average latency to be 300 to be lower than lowWaterMark but still not healthy because
//points map has clients with less than perfect health
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
//make all points to have 120 so the cluster becomes "healthy"
points.put(uri1, 120);
points.put(uri2, 120);
points.put(uri3, 120);
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
//if currentAverageClusterLatency is > low water mark then cluster becomes unhealthy
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyConfig in project rest.li by linkedin.
the class DegraderRingFactoryTest method testRandomChangePoints.
@Test(groups = { "small", "back-end" })
public void testRandomChangePoints() throws URISyntaxException {
int pointNum = 5;
int loopNum = 100;
Map<String, Integer> pointsMp = buildPointsMap(pointNum);
Map<String, Integer> maxPoints = new HashMap<>(pointNum);
Random random = new Random();
for (String uri : pointsMp.keySet()) {
maxPoints.put(uri, 100);
}
PointBasedConsistentHashRingFactory<String> ringFactory = new PointBasedConsistentHashRingFactory<>(new DegraderLoadBalancerStrategyConfig(1L));
Ring<String> ring = ringFactory.createRing(pointsMp);
assertNotNull(ring.get(1000));
for (int i = 0; i < loopNum; ++i) {
// new point list
for (String uri : pointsMp.keySet()) {
int newPoints = random.nextInt(200);
if (newPoints == 0) {
continue;
}
pointsMp.put(uri, newPoints);
if (newPoints > maxPoints.get(uri)) {
maxPoints.put(uri, ((newPoints + 3) / 4) * 4);
}
}
ring = ringFactory.createRing(pointsMp);
assertNotNull(ring.get(1000));
Map<String, List<Point<String>>> pointList = ringFactory.getPointsMap();
for (String uri : pointsMp.keySet()) {
assertEquals((int) maxPoints.get(uri), pointList.get(uri).size());
}
}
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyConfig in project rest.li by linkedin.
the class ConsistentHashKeyMapperTest method testMapKeysConcurrency.
@SuppressWarnings("rawtypes")
@Test
public void testMapKeysConcurrency() throws Exception {
String serviceName = "articles";
String clusterName = "cluster";
String path = "path";
String strategyName = "degrader";
int numPartitions = 500;
// setup partition
Map<URI, Map<Integer, PartitionData>> partitionDescriptions = new HashMap<URI, Map<Integer, PartitionData>>();
final URI foo1 = new URI("http://foo1.com");
Map<Integer, PartitionData> foo1Data = new HashMap<Integer, PartitionData>();
for (int i = 0; i < numPartitions; i++) {
foo1Data.put(i, new PartitionData(1.0));
}
partitionDescriptions.put(foo1, foo1Data);
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(new DegraderLoadBalancerStrategyConfig(5000), serviceName, null);
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = new ArrayList<LoadBalancerState.SchemeStrategyPair>();
orderedStrategies.add(new LoadBalancerState.SchemeStrategyPair("http", strategy));
PartitionAccessor accessor = new TestDeadlockPartitionAccessor(numPartitions);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(new PartitionedLoadBalancerTestState(clusterName, serviceName, path, strategyName, partitionDescriptions, orderedStrategies, accessor));
ConsistentHashKeyMapper mapper = new ConsistentHashKeyMapper(balancer, balancer);
CountDownLatch latch = new CountDownLatch(numPartitions);
List<Runnable> runnables = createRunnables(numPartitions, mapper, serviceName, latch);
final ExecutorService executor = Executors.newFixedThreadPool(numPartitions);
List<Future> futures = new ArrayList<Future>();
for (int i = 0; i < numPartitions; i++) {
futures.add(executor.submit(runnables.get(i)));
}
for (Future future : futures) {
future.get(30, TimeUnit.SECONDS);
}
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyConfig in project rest.li by linkedin.
the class DegraderLoadBalancerStateTest method testConcurrentResizeAndSet.
/**
* Resizing the array of partitions doesn't interfere with setting partition state.
*/
@Test(groups = { "small", "back-end" })
public void testConcurrentResizeAndSet() throws InterruptedException {
// This test aims to reproduce a specific bug, which occurs when one thread sets a
// partition state while another thread is in the middle of resizing the array of states.
// To reproduce this, we inject a tricky Clock, which pauses execution of the latter
// thread in the middle of resizing (when constructing the new partition state).
// This depends on DegraderLoadBalancerState to call the clock at least once to initialize
// partition 1. If that changes, you'll have to change clock-related constants below.
final PauseClock clock = new PauseClock();
final DegraderLoadBalancerState subject = new DegraderLoadBalancerStrategyV3(new DegraderLoadBalancerStrategyConfig(5000, true, 1, null, Collections.<String, Object>emptyMap(), clock, 1, 1, 1, 1, 1, 1, 1, 1, 0.2, null, 21, null, 0.1, null, null, null, null, 100), SERVICE_NAME, null).getState();
Thread getPartition1 = new Thread() {
@Override
public void run() {
// resize the array as a side-effect
subject.getPartitionState(1);
}
};
assertNotNull(subject.getPartitionState(0));
final long clockCalled = clock._calls.get();
// 1 partition initialized (so far)
assertTrue(clockCalled > 0, "clock not called");
clock._paused = new CountDownLatch(1);
clock._resume = new CountDownLatch(1);
getPartition1.start();
assertTrue(clock._paused.await(60, TimeUnit.SECONDS));
// Now getPartition1 has started resizing the array.
final PartitionDegraderLoadBalancerState newState = newPartitionState(0, 0);
assertNotSame(subject.getPartitionState(0), newState);
subject.setPartitionState(0, newState);
assertSame(subject.getPartitionState(0), newState);
clock._resume.countDown();
getPartition1.join(60000);
assertFalse(getPartition1.isAlive());
// Now getPartition1 has finished resizing the array.
// as before
assertSame(subject.getPartitionState(0), newState);
// 2 partitions initialized
assertTrue(clock._calls.get() > clockCalled, "clock not called again");
}
Aggregations