use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testLowTrafficHighLatency10Clients.
@Test(groups = { "small", "back-end" })
public void testLowTrafficHighLatency10Clients() {
Map<String, Object> myMap = new HashMap<String, Object>();
Long timeInterval = 5000L;
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, timeInterval);
//we need to override the min call count to 0 because we're testing a service with low traffic.
//if we don't do this, the computedDropRate will not change and we will never be able to recover
//after we degraded the cluster.
Map<String, String> degraderImplProperties = new HashMap<String, String>();
degraderImplProperties.put(PropertyKeys.DEGRADER_MIN_CALL_COUNT, "1");
degraderImplProperties.put(PropertyKeys.DEGRADER_HIGH_ERROR_RATE, "0.5");
degraderImplProperties.put(PropertyKeys.DEGRADER_LOW_ERROR_RATE, "0.2");
DegraderImpl.Config degraderConfig = DegraderConfigFactory.toDegraderConfig(degraderImplProperties);
double qps = 0.3;
//test Strategy V3
List<TrackerClient> clients = createTrackerClient(10, clock, degraderConfig);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV3 strategyV3 = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null);
DegraderLoadBalancerStrategyAdapter strategy = new DegraderLoadBalancerStrategyAdapter(strategyV3);
testDegraderLoadBalancerSimulator(strategy, clock, timeInterval, clients, qps, degraderConfig);
//test Strategy V2
clients = createTrackerClient(10, clock, degraderConfig);
config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV2_1 strategyV2 = new DegraderLoadBalancerStrategyV2_1(config, "DegraderLoadBalancerTest", null);
strategy = new DegraderLoadBalancerStrategyAdapter(strategyV2);
testDegraderLoadBalancerSimulator(strategy, clock, timeInterval, clients, qps, degraderConfig);
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testLoadBalancerCallDroppingMode.
/**
* Tests that overrideDropRate is affected by the minCallCount. If we don't have enough calls
* then it shouldn't affect cluster drop rate. There are many other pathways that we want to test here.
*/
@Test(groups = { "small", "back-end" })
public void testLoadBalancerCallDroppingMode() {
Map<String, Object> myMap = new HashMap<String, Object>();
Long timeInterval = 5000L;
double highWaterMark = 1000;
double lowWaterMark = 500;
long minCallHighWaterMark = 50l;
long minCallLowWaterMark = 20l;
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, timeInterval);
myMap.put(PropertyKeys.HTTP_LB_HIGH_WATER_MARK, highWaterMark);
myMap.put(PropertyKeys.HTTP_LB_LOW_WATER_MARK, lowWaterMark);
myMap.put(PropertyKeys.HTTP_LB_CLUSTER_MIN_CALL_COUNT_HIGH_WATER_MARK, minCallHighWaterMark);
myMap.put(PropertyKeys.HTTP_LB_CLUSTER_MIN_CALL_COUNT_LOW_WATER_MARK, minCallLowWaterMark);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
//test strategy v3
DegraderLoadBalancerStrategyAdapter strategyAdapter = new DegraderLoadBalancerStrategyAdapter(new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null));
final List<TrackerClient> clients = createTrackerClient(3, clock, null);
testCallDroppingHelper(strategyAdapter, clients, clock, timeInterval);
//test strategy v2
strategyAdapter = new DegraderLoadBalancerStrategyAdapter(new DegraderLoadBalancerStrategyV2_1(config, "DegraderLoadBalancerTest", null));
testCallDroppingHelper(strategyAdapter, clients, clock, timeInterval);
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testClusterRecovery2TC.
@Test(groups = { "small", "back-end" })
public void testClusterRecovery2TC() {
final int NUM_CHECKS = 5;
final Long TIME_INTERVAL = 5000L;
Map<String, Object> myMap = new HashMap<String, Object>();
// 1,2,4,8,16,32,64,100% steps, given a 2x recovery step coefficient
int localStepsToFullRecovery = 8;
myMap.put(PropertyKeys.HTTP_LB_INITIAL_RECOVERY_LEVEL, 0.005);
myMap.put(PropertyKeys.HTTP_LB_RING_RAMP_FACTOR, 2d);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, TIME_INTERVAL);
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null);
List<TrackerClient> clients = new ArrayList<TrackerClient>();
URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
URI uri2 = URI.create("http://test.linkedin.com:3243/fdsaf");
URIRequest request = new URIRequest(uri1);
List<CallCompletion> ccList = new ArrayList<CallCompletion>();
CallCompletion cc;
TrackerClient client1 = new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
TrackerClient client2 = new TrackerClient(uri2, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri2), clock, null);
clients.add(client1);
clients.add(client2);
// force client1 to be disabled if we encounter errors/high latency
DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient1Default.setMinCallCount(5);
dcClient1Default.setOverrideMinCallCount(5);
dcClient1Default.setUpStep(1.0);
// force client2 to be disabled if we encounter errors/high latency
DegraderControl dcClient2Default = client2.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient2Default.setOverrideMinCallCount(5);
dcClient2Default.setMinCallCount(5);
dcClient2Default.setUpStep(0.4);
// Have one cycle of successful calls to verify valid tracker clients returned.
// try load balancing on this updateState, need to updateState before forcing the strategy.
TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
strategy.setStrategy(DEFAULT_PARTITION_ID, DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
for (int j = 0; j < NUM_CHECKS; j++) {
ccList.add(client1.getCallTracker().startCall());
ccList.add(client2.getCallTracker().startCall());
}
clock.addMs(1);
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCall();
}
// bump to next interval, and get stats.
clock.addMs(5000);
// try Load balancing on this updateState
strategy.setStrategy(DEFAULT_PARTITION_ID, DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 0.0);
Assert.assertEquals(dcClient2Default.getCurrentComputedDropRate(), 0.0);
// now simulate a bad cluster state with high error and high latency
for (int j = 0; j < NUM_CHECKS; j++) {
ccList.add(client1.getCallTracker().startCall());
ccList.add(client2.getCallTracker().startCall());
}
clock.addMs(3500);
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCallWithError();
}
// go to next interval
clock.addMs(5000);
Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);
Assert.assertEquals(dcClient2Default.getCurrentComputedDropRate(), 0.4);
// trigger a state update, the returned TrackerClient should be client2
// because client 1 should have gone up to a 1.0 drop rate, and the cluster should
// be unhealthy
strategy.setStrategy(DEFAULT_PARTITION_ID, DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertEquals(resultTC, client2);
// it's current drop rate.
do {
// go to next time interval.
clock.addMs(TIME_INTERVAL);
// adjust the hash ring this time.
strategy.setStrategy(DEFAULT_PARTITION_ID, DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
localStepsToFullRecovery--;
} while (localStepsToFullRecovery > 0);
assertNotNull(resultTC, "expected non-null trackerclient");
assertTrue(strategy.getState().getPartitionState(DEFAULT_PARTITION_ID).getPointsMap().get(client1.getUri()) == client1.getPartitionWeight(DEFAULT_PARTITION_ID) * config.getPointsPerWeight(), "client1 did not recover to full weight in hash map.");
Assert.assertEquals(dcClient2Default.getCurrentComputedDropRate(), 0.4, "client2 drop rate not as expected");
cc = client1.getCallTracker().startCall();
clock.addMs(10);
cc.endCall();
clock.addMs(TIME_INTERVAL);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testStateIsNullAndCallCountIsGreaterThanZeroWithLatency.
@Test(groups = { "small", "back-end" })
public void testStateIsNullAndCallCountIsGreaterThanZeroWithLatency() throws URISyntaxException, InterruptedException {
// check for average cluster latency < max latency
// max so we don't time out from lag on testing machine
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(new DegraderLoadBalancerStrategyConfig(5000), "DegraderLoadBalancerTest", null);
List<TrackerClient> clients = new ArrayList<TrackerClient>();
TestClock clock1 = new TestClock();
TestClock clock2 = new TestClock();
clients.add(getClient(URI.create("http://test.linkedin.com:3242/fdsaf"), clock1));
clients.add(getClient(URI.create("http://test.linkedin.com:3243/fdsaf"), clock2));
for (int i = 0; i < 1000; ++i) {
clients.get(i % 2).getCallTracker().startCall().endCall();
}
clock1.addMs(5000);
// this should trigger setting _state (state is null and count > 0)
getTrackerClient(strategy, null, new RequestContext(), -1, clients);
// we chose to initialize the state first.
for (TrackerClient client : clients) {
assertEquals(client.getDegraderControl(DEFAULT_PARTITION_ID).getOverrideDropRate(), 0.0);
}
}
use of com.linkedin.d2.balancer.strategies.degrader.DegraderLoadBalancerStrategyV3 in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testWeightedAndLatencyDegradationBalancingRing.
@Test(groups = { "small", "back-end" }, dataProvider = "consistentHashAlgorithms")
public void testWeightedAndLatencyDegradationBalancingRing(String consistentHashAlgorithm) throws URISyntaxException {
DegraderLoadBalancerStrategyV3 strategy = getStrategy(consistentHashAlgorithm);
List<TrackerClient> clients = new ArrayList<TrackerClient>();
URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
URI uri2 = URI.create("http://test.linkedin.com:3243/fdsaf");
TestClock clock1 = new TestClock();
TestClock clock2 = new TestClock();
TrackerClient client1 = new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock1, null);
TrackerClient client2 = new TrackerClient(uri2, getDefaultPartitionData(0.8d), new TestLoadBalancerClient(uri2), clock2, null);
clients.add(client1);
clients.add(client2);
DegraderControl dcClient2Default = client2.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient2Default.setOverrideMinCallCount(1);
dcClient2Default.setMinCallCount(1);
dcClient2Default.setMaxDropRate(1d);
dcClient2Default.setUpStep(0.4d);
dcClient2Default.setHighErrorRate(0);
CallCompletion cc = client2.getCallTracker().startCall();
clock2.addMs(1);
cc.endCallWithError();
clock1.addMs(15000);
clock2.addMs(5000);
// trigger a state update
assertNotNull(getTrackerClient(strategy, null, new RequestContext(), 1, clients));
// now do a basic verification to verify getTrackerClient is properly weighting things
double calls = 10000d;
int client1Count = 0;
int client2Count = 0;
double tolerance = 0.05d;
for (int i = 0; i < calls; ++i) {
TrackerClient client = getTrackerClient(strategy, null, new RequestContext(), 1, clients);
assertNotNull(client);
if (client.getUri().equals(uri1)) {
++client1Count;
} else {
++client2Count;
}
}
assertTrue(Math.abs((client1Count / calls) - (100 / 148d)) < tolerance);
assertTrue(Math.abs((client2Count / calls) - (48 / 148d)) < tolerance);
}
Aggregations