use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testAdjustedMinCallCount.
@Test(groups = { "small", "back-end" })
public void testAdjustedMinCallCount() {
final int NUM_CHECKS = 5;
final Long TIME_INTERVAL = 5000L;
Map<String, Object> myMap = lbDefaultConfig();
// myMap.put(PropertyKeys.LB_INITIAL_RECOVERY_LEVEL, 0.01);
// myMap.put("rampFactor", 2d);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, TIME_INTERVAL);
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null, DEGRADER_STATE_LISTENER_FACTORIES);
List<DegraderTrackerClient> clients = new ArrayList<>();
URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
URIRequest request = new URIRequest(uri1);
List<CallCompletion> ccList = new ArrayList<>();
CallCompletion cc;
DegraderTrackerClient client1 = new DegraderTrackerClientImpl(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
clients.add(client1);
// force client1 to be disabled if we encounter errors/high latency
DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient1Default.setOverrideMinCallCount(5);
dcClient1Default.setMinCallCount(5);
dcClient1Default.setUpStep(1.0);
dcClient1Default.setHighErrorRate(0);
// Issue high latency calls to reduce client1 to the minimum number of hash points allowed.
// (1 in this case)
TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
for (int j = 0; j < NUM_CHECKS; j++) {
cc = ((DegraderTrackerClient) resultTC).getCallTracker().startCall();
ccList.add(cc);
}
clock.addMs(3500);
for (int j = 0; j < NUM_CHECKS; j++) {
cc = ccList.get(j);
cc.endCall();
}
// bump to next interval, and get stats.
clock.addMs(5000);
// because we want to test out the adjusted min drop rate, force the hash ring adjustment now.
strategy.setStrategy(DEFAULT_PARTITION_ID, PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
// client1 should be reduced to 1 hash point, but since it is the only TC, it should be the
// TC returned.
assertEquals(resultTC, client1, "expected non-null trackerclient");
assertEquals((int) (strategy.getState().getPartitionState(DEFAULT_PARTITION_ID).getPointsMap().get(client1.getUri())), 1, "expected client1 to have only 1 point in hash map");
// make low latency call, we expect the computedDropRate to be adjusted because the minimum
// call count was also scaled down.
cc = client1.getCallTracker().startCall();
clock.addMs(10);
cc.endCall();
clock.addMs(TIME_INTERVAL);
Assert.assertTrue(dcClient1Default.getCurrentComputedDropRate() < 1.0, "client1 drop rate not less than 1.");
}
use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.
the class DegraderLoadBalancerTest method TestGetTrackerClients.
// Performance test, disabled by default
@Test(groups = { "small", "back-end" }, enabled = false)
public void TestGetTrackerClients() {
final DegraderLoadBalancerStrategyV3 strategy = getStrategy();
TestClock testClock = new TestClock();
String baseUri = "http://linkedin.com:9999";
int numberOfClients = 100;
int loopNumber = 100000;
Map<String, String> degraderProperties = new HashMap<>();
degraderProperties.put(PropertyKeys.DEGRADER_HIGH_ERROR_RATE, "0.5");
degraderProperties.put(PropertyKeys.DEGRADER_LOW_ERROR_RATE, "0.2");
DegraderImpl.Config degraderConfig = DegraderConfigFactory.toDegraderConfig(degraderProperties);
RequestContext requestContext = new RequestContext();
Random random = new Random();
final List<DegraderTrackerClient> clients = new ArrayList<>(numberOfClients);
Map<TrackerClient, Integer> clientCount = new HashMap<>();
// create trackerclients
for (int i = 0; i < numberOfClients; i++) {
URI uri = URI.create(baseUri + i);
DegraderTrackerClient client = new DegraderTrackerClientImpl(uri, getDefaultPartitionData(1, 1), new TestLoadBalancerClient(uri), testClock, degraderConfig);
clients.add(client);
}
for (int i = 0; i < loopNumber; ++i) {
TrackerClient client = strategy.getTrackerClient(null, requestContext, 1, DefaultPartitionAccessor.DEFAULT_PARTITION_ID, toMap(clients));
assertNotNull(client);
Integer count = clientCount.get(client);
if (count == null) {
clientCount.put(client, 1);
} else {
clientCount.put(client, count + 1);
}
}
int i = 0;
int avg_count = (loopNumber * 5) / (numberOfClients * 10);
for (Integer count : clientCount.values()) {
assertTrue(count >= avg_count);
i++;
}
assertTrue(i == numberOfClients);
}
use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testDropDueToDegrader.
@Test(groups = { "small", "back-end" })
public void testDropDueToDegrader() throws URISyntaxException {
DegraderLoadBalancerStrategyV3 strategy = getStrategy();
List<DegraderTrackerClient> clients = new ArrayList<>();
List<DegraderTrackerClientUpdater> clientUpdaters = new ArrayList<>();
clients.add(getClient(URI.create("http://test.linkedin.com:3242/fdsaf"), new TestClock()));
clients.add(getClient(URI.create("http://test.linkedin.com:3243/fdsaf"), new TestClock()));
for (DegraderTrackerClient client : clients) {
clientUpdaters.add(new DegraderTrackerClientUpdater(client, DEFAULT_PARTITION_ID));
}
// first verify that we're getting clients
assertNotNull(getTrackerClient(strategy, null, new RequestContext(), 0, clients));
assertFalse(clients.get(0).getDegrader(DEFAULT_PARTITION_ID).checkDrop());
assertFalse(clients.get(1).getDegrader(DEFAULT_PARTITION_ID).checkDrop());
// now force drop rate to 100% for entire cluster
DegraderLoadBalancerStrategyV3.overrideClusterDropRate(DEFAULT_PARTITION_ID, 1d, clientUpdaters);
for (DegraderTrackerClientUpdater clientUpdater : clientUpdaters) {
clientUpdater.update();
}
// now verify that everything is dropping
assertNull(getTrackerClient(strategy, null, new RequestContext(), 1, clients));
assertTrue(clients.get(0).getDegrader(DEFAULT_PARTITION_ID).checkDrop());
assertTrue(clients.get(1).getDegrader(DEFAULT_PARTITION_ID).checkDrop());
}
use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testCallDroppingHelper.
private void testCallDroppingHelper(DegraderLoadBalancerStrategyAdapter strategyAdapter, List<DegraderTrackerClient> clients, TestClock clock, Long timeInterval) {
// test clusterOverrideDropRate won't increase even though latency is 3000 ms because the traffic is low
callClients(3000, 0.2, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
URIRequest request = new URIRequest(clients.get(0).getUri());
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.0d, strategyAdapter.getCurrentOverrideDropRate()));
// if we increase the QPS from 0.2 to 25, then we'll start dropping calls
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.2d, strategyAdapter.getCurrentOverrideDropRate()));
;
// if we set the QPS to be somewhere below high and low water mark then the drop rate stays the same
// even though the latency is high
callClients(3000, 2, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.2d, strategyAdapter.getCurrentOverrideDropRate()));
// now we want to degrade the cluster even further
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.4d, strategyAdapter.getCurrentOverrideDropRate()));
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.6d, strategyAdapter.getCurrentOverrideDropRate()));
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.8d, strategyAdapter.getCurrentOverrideDropRate()));
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(1.0d, strategyAdapter.getCurrentOverrideDropRate()));
// if we have qps below lowWaterMark, we will reduce drop rate even though latency is high
callClients(3000, 0.5, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.8d, strategyAdapter.getCurrentOverrideDropRate()));
// if we have qps below lowWaterMark and qps is low, we will also reduce drop rate
callClients(100, 0.5, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.6d, strategyAdapter.getCurrentOverrideDropRate()));
// if we have qps between lowWaterMark and highWaterMark and latency is low, we will reduce drop rate
callClients(100, 2, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.4d, strategyAdapter.getCurrentOverrideDropRate()));
// if we have qps higher than highWaterMark and latency is low, we will reduce drop rate
callClients(100, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.2d, strategyAdapter.getCurrentOverrideDropRate()));
}
use of com.linkedin.d2.balancer.clients.DegraderTrackerClient in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testStateIsNullAndCallCountIsGreaterThanZero.
@Test(groups = { "small", "back-end" })
public void testStateIsNullAndCallCountIsGreaterThanZero() throws URISyntaxException, InterruptedException {
// check for average cluster latency < max latency
// max so we don't time out from lag on testing machine
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(new DegraderLoadBalancerStrategyConfig(5000), "DegraderLoadBalancerTest", null, DEGRADER_STATE_LISTENER_FACTORIES);
List<DegraderTrackerClient> clients = new ArrayList<>();
TestClock clock1 = new TestClock();
TestClock clock2 = new TestClock();
clients.add(getClient(URI.create("http://test.linkedin.com:3242/fdsaf"), clock1));
clients.add(getClient(URI.create("http://test.linkedin.com:3243/fdsaf"), clock2));
for (int i = 0; i < 1000; ++i) {
clients.get(i % 2).getCallTracker().startCall().endCall();
}
clock1.addMs(5000);
// this should trigger setting _state (state is null and count > 0) with an override
// of 0d
getTrackerClient(strategy, null, new RequestContext(), -1, clients);
for (DegraderTrackerClient client : clients) {
assertEquals(client.getDegraderControl(DEFAULT_PARTITION_ID).getOverrideDropRate(), 0d);
}
}
Aggregations