use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testResizeProblem.
@Test(groups = { "small", "back-end" })
public void testResizeProblem() {
URI uri = URI.create("http://linkedin.com:9999");
final ExecutorService executor = Executors.newFixedThreadPool(2);
int totalSuccessfulInitialization = 0;
try {
for (int i = 0; i < 20000; i++) {
CountDownLatch joinLatch = new CountDownLatch(2);
TestClock clock = new TestClock();
final DegraderLoadBalancerStrategyV3 strategy = getStrategy();
for (Runnable runnable : createRaceCondition(uri, clock, strategy, joinLatch)) {
executor.submit(runnable);
}
try {
if (!joinLatch.await(10, TimeUnit.SECONDS)) {
fail("Update or resize failed to finish within 10 seconds");
}
// Before the fix the resize problem, initialization for partiion 0 would fail if the following race condition happened:
// thread A sets updateStarted flag for partition 0, thread B resizes partition count and
// copies state for partition0 with updateStarted == true, thread A clears the flag and updates state for partition 0,
// thread B swaps in the new array of states for enlarged number of partitions, finishes resize, ignoring thread A's update
// Now with the fix, we expect the above not to happen
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState state = strategy.getState().getPartitionState(0);
totalSuccessfulInitialization += state.isInitialized() ? 1 : 0;
} catch (InterruptedException ex) {
}
}
} finally {
executor.shutdownNow();
}
assertEquals(totalSuccessfulInitialization, 20000);
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testClusterRecoveryFast1TC.
@Test(groups = { "small", "back-end" })
public void testClusterRecoveryFast1TC() {
Map<String, Object> myMap = new HashMap<String, Object>();
Long timeInterval = 5000L;
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
// This recovery level will put one point into the hash ring, which is good enough to
// send traffic to it because it is the only member of the cluster.
myMap.put("initialRecoverLevel", 0.01);
myMap.put(PropertyKeys.HTTP_LB_RING_RAMP_FACTOR, 2.0);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, timeInterval);
int stepsToFullRecovery = 0;
//test Strategy V3
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV3 strategyV3 = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null);
DegraderLoadBalancerStrategyAdapter strategy = new DegraderLoadBalancerStrategyAdapter(strategyV3);
clusterRecovery1TC(myMap, clock, stepsToFullRecovery, timeInterval, strategy, null, DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
//test Strategy V2
config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV2_1 strategyV2 = new DegraderLoadBalancerStrategyV2_1(config, "DegraderLoadBalancerTest", null);
strategy = new DegraderLoadBalancerStrategyAdapter(strategyV2);
clusterRecovery1TC(myMap, clock, stepsToFullRecovery, timeInterval, strategy, DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy.LOAD_BALANCE, null);
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testDegraderLoadBalancerStateComparison.
@Test(groups = { "small", "back-end" })
public void testDegraderLoadBalancerStateComparison() throws URISyntaxException {
long clusterGenerationId = 1;
long lastUpdated = 29999;
long updateIntervalMs = 5000;
DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy strategy = DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy.LOAD_BALANCE;
long currentAverageClusterLatency = 3000;
Map<String, Object> configMap = new HashMap<String, Object>();
configMap.put(PropertyKeys.HTTP_LB_LOW_WATER_MARK, 500d);
configMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_POINTS_PER_WEIGHT, 120);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(configMap);
long clusterCallCount = 15;
Map<TrackerClient, DegraderLoadBalancerQuarantine> quarantineMap = new HashMap<>();
Map<TrackerClient, DegraderLoadBalancerQuarantine> quarantineStore = new HashMap<>();
double currentOverrideDropRate = 0.4;
boolean initialized = true;
String name = "degraderV2";
Map<URI, Integer> points = new HashMap<URI, Integer>();
Map<TrackerClient, Double> recoveryMap = new HashMap<TrackerClient, Double>();
URI uri1 = new URI("http://test.linkedin.com:10010/abc0");
URI uri2 = new URI("http://test.linkedin.com:10010/abc1");
URI uri3 = new URI("http://test.linkedin.com:10010/abc2");
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
RingFactory<URI> ringFactory = new DegraderRingFactory<>(config);
TestClock clock = new TestClock();
List<TrackerClient> clients = createTrackerClient(3, clock, null);
List<TrackerClientUpdater> clientUpdaters = new ArrayList<TrackerClientUpdater>();
for (TrackerClient client : clients) {
recoveryMap.put(client, 0.0);
clientUpdaters.add(new TrackerClientUpdater(client, DEFAULT_PARTITION_ID));
}
DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState oldStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId + 1, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
//we don't care about last updated
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated + 30, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
points.put(uri1, 30);
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
points.put(uri1, 100);
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate - 0.1, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
//we don't care about averageClusterLatency as far as for printing the state
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency + 3, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
for (TrackerClient client : clients) {
recoveryMap.put(client, 0.3);
}
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isOldStateTheSameAsNewState(oldStateV2, newStateV2));
//test state health comparison
assertFalse(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//make cluster average latency to be 300 to be lower than lowWaterMark but still not healthy because
//points map has clients with less than perfect health
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, 300, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//make all points to have 120 so the cluster becomes "healthy"
points.put(uri1, 120);
points.put(uri2, 120);
points.put(uri3, 120);
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, 300, initialized, recoveryMap, name, null, clusterCallCount);
assertTrue(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//if currentAverageClusterLatency is > low water mark then cluster becomes unhealthy
newStateV2 = new DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState(updateIntervalMs, clusterGenerationId, points, lastUpdated, strategy, currentOverrideDropRate, currentAverageClusterLatency, initialized, recoveryMap, name, null, clusterCallCount);
assertFalse(DegraderLoadBalancerStrategyV2_1.isNewStateHealthy(newStateV2, config, clientUpdaters));
//test DegraderLoadBalancerV3
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy strategyV3 = DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING;
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState oldStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId + 1, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated + 300, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 77);
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 50);
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate + 0.4, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency + 55, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
//we don't care about averageClusterLatency for comparing states
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
for (TrackerClient client : clients) {
recoveryMap.put(client, 0.5);
}
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
//test state health comparison
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
//make cluster average latency to be 300 to be lower than lowWaterMark but still not healthy because
//points map has clients with less than perfect health
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
//make all points to have 120 so the cluster becomes "healthy"
points.put(uri1, 120);
points.put(uri2, 120);
points.put(uri3, 120);
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertTrue(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
//if currentAverageClusterLatency is > low water mark then cluster becomes unhealthy
newStateV3 = new DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, quarantineMap, quarantineStore);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class ConsistentHashKeyMapperTest method testMapKeysConcurrency.
@SuppressWarnings("rawtypes")
@Test
public void testMapKeysConcurrency() throws Exception {
String serviceName = "articles";
String clusterName = "cluster";
String path = "path";
String strategyName = "degrader";
int numPartitions = 500;
// setup partition
Map<URI, Map<Integer, PartitionData>> partitionDescriptions = new HashMap<URI, Map<Integer, PartitionData>>();
final URI foo1 = new URI("http://foo1.com");
Map<Integer, PartitionData> foo1Data = new HashMap<Integer, PartitionData>();
for (int i = 0; i < numPartitions; i++) {
foo1Data.put(i, new PartitionData(1.0));
}
partitionDescriptions.put(foo1, foo1Data);
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(new DegraderLoadBalancerStrategyConfig(5000), serviceName, null);
List<LoadBalancerState.SchemeStrategyPair> orderedStrategies = new ArrayList<LoadBalancerState.SchemeStrategyPair>();
orderedStrategies.add(new LoadBalancerState.SchemeStrategyPair("http", strategy));
PartitionAccessor accessor = new TestDeadlockPartitionAccessor(numPartitions);
SimpleLoadBalancer balancer = new SimpleLoadBalancer(new PartitionedLoadBalancerTestState(clusterName, serviceName, path, strategyName, partitionDescriptions, orderedStrategies, accessor));
ConsistentHashKeyMapper mapper = new ConsistentHashKeyMapper(balancer, balancer);
CountDownLatch latch = new CountDownLatch(numPartitions);
List<Runnable> runnables = createRunnables(numPartitions, mapper, serviceName, latch);
final ExecutorService executor = Executors.newFixedThreadPool(numPartitions);
List<Future> futures = new ArrayList<Future>();
for (int i = 0; i < numPartitions; i++) {
futures.add(executor.submit(runnables.get(i)));
}
for (Future future : futures) {
future.get(30, TimeUnit.SECONDS);
}
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testBalancingRing.
@Test(groups = { "small", "back-end" }, dataProvider = "consistentHashAlgorithms")
public void testBalancingRing(String consistentHashAlgorithm) throws URISyntaxException {
DegraderLoadBalancerStrategyV3 strategy = getStrategy(consistentHashAlgorithm);
List<TrackerClient> clients = new ArrayList<TrackerClient>();
URI uri1 = URI.create("http://someTestService/someTestUrl");
URI uri2 = URI.create("http://abcxfweuoeueoueoueoukeueoueoueoueoueouo/2354");
TestClock clock1 = new TestClock();
TestClock clock2 = new TestClock();
TrackerClient client1 = getClient(uri1, clock1);
TrackerClient client2 = getClient(uri2, clock2);
clients.add(client1);
clients.add(client2);
// force client2 to be disabled
DegraderControl dcClient2Default = client2.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient2Default.setOverrideMinCallCount(1);
dcClient2Default.setMinCallCount(1);
dcClient2Default.setMaxDropRate(1d);
dcClient2Default.setUpStep(0.4d);
dcClient2Default.setHighErrorRate(0);
CallCompletion cc = client2.getCallTracker().startCall();
clock2.addMs(1);
cc.endCallWithError();
clock1.addMs(15000);
clock2.addMs(5000);
// now do a basic verification to verify getTrackerClient is properly weighting things
double calls = 10000d;
int client1Count = 0;
int client2Count = 0;
double tolerance = 0.05d;
for (int i = 0; i < calls; ++i) {
TrackerClient client = getTrackerClient(strategy, null, new RequestContext(), 1, clients);
assertNotNull(client);
if (client.getUri().equals(uri1)) {
++client1Count;
} else {
++client2Count;
}
}
assertTrue(Math.abs((client1Count / calls) - (100 / 160d)) < tolerance);
assertTrue(Math.abs((client2Count / calls) - (60 / 160d)) < tolerance);
}
Aggregations