use of com.linkedin.d2.discovery.util.Stats in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testClusterRecovery2TC.
@Test(groups = { "small", "back-end" })
public void testClusterRecovery2TC() {
final int NUM_CHECKS = 5;
final Long TIME_INTERVAL = 5000L;
Map<String, Object> myMap = lbDefaultConfig();
// 1,2,4,8,16,32,64,100% steps, given a 2x recovery step coefficient
int localStepsToFullRecovery = 8;
myMap.put(PropertyKeys.HTTP_LB_INITIAL_RECOVERY_LEVEL, 0.005);
myMap.put(PropertyKeys.HTTP_LB_RING_RAMP_FACTOR, 2d);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, TIME_INTERVAL);
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null, DEGRADER_STATE_LISTENER_FACTORIES);
List<DegraderTrackerClient> clients = new ArrayList<>();
URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
URI uri2 = URI.create("http://test.linkedin.com:3243/fdsaf");
URIRequest request = new URIRequest(uri1);
List<CallCompletion> ccList = new ArrayList<>();
CallCompletion cc;
DegraderTrackerClient client1 = new DegraderTrackerClientImpl(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
DegraderTrackerClient client2 = new DegraderTrackerClientImpl(uri2, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri2), clock, null);
clients.add(client1);
clients.add(client2);
// force client1 to be disabled if we encounter errors/high latency
DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient1Default.setMinCallCount(5);
dcClient1Default.setOverrideMinCallCount(5);
dcClient1Default.setUpStep(1.0);
// force client2 to be disabled if we encounter errors/high latency
DegraderControl dcClient2Default = client2.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient2Default.setOverrideMinCallCount(5);
dcClient2Default.setMinCallCount(5);
dcClient2Default.setUpStep(0.4);
// Have one cycle of successful calls to verify valid tracker clients returned.
// try load balancing on this updateState, need to updateState before forcing the strategy.
TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
strategy.setStrategy(DEFAULT_PARTITION_ID, PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
for (int j = 0; j < NUM_CHECKS; j++) {
ccList.add(client1.getCallTracker().startCall());
ccList.add(client2.getCallTracker().startCall());
}
clock.addMs(1);
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCall();
}
// bump to next interval, and get stats.
clock.addMs(5000);
// try Load balancing on this updateState
strategy.setStrategy(DEFAULT_PARTITION_ID, PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 0.0);
Assert.assertEquals(dcClient2Default.getCurrentComputedDropRate(), 0.0);
// now simulate a bad cluster state with high error and high latency
for (int j = 0; j < NUM_CHECKS; j++) {
ccList.add(client1.getCallTracker().startCall());
ccList.add(client2.getCallTracker().startCall());
}
clock.addMs(3500);
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCallWithError();
}
// go to next interval
clock.addMs(5000);
Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);
Assert.assertEquals(dcClient2Default.getCurrentComputedDropRate(), 0.4);
// trigger a state update, the returned TrackerClient should be client2
// because client 1 should have gone up to a 1.0 drop rate, and the cluster should
// be unhealthy
strategy.setStrategy(DEFAULT_PARTITION_ID, PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertEquals(resultTC, client2);
// it's current drop rate.
do {
// go to next time interval.
clock.addMs(TIME_INTERVAL);
// adjust the hash ring this time.
strategy.setStrategy(DEFAULT_PARTITION_ID, PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
localStepsToFullRecovery--;
} while (localStepsToFullRecovery > 0);
assertNotNull(resultTC, "expected non-null trackerclient");
assertTrue(strategy.getState().getPartitionState(DEFAULT_PARTITION_ID).getPointsMap().get(client1.getUri()) == client1.getPartitionWeight(DEFAULT_PARTITION_ID) * config.getPointsPerWeight(), "client1 did not recover to full weight in hash map.");
Assert.assertEquals(dcClient2Default.getCurrentComputedDropRate(), 0.4, "client2 drop rate not as expected");
cc = client1.getCallTracker().startCall();
clock.addMs(10);
cc.endCall();
clock.addMs(TIME_INTERVAL);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
}
use of com.linkedin.d2.discovery.util.Stats in project rest.li by linkedin.
the class TrackerClientMockHelper method mockTrackerClients.
/**
* Mock a list of {@link TrackerClient} without call stats
*
* @param numTrackerClients The number of hosts to be mocked
* @return A list of mocked {@link TrackerClient}
*/
public static List<TrackerClient> mockTrackerClients(int numTrackerClients) {
List<TrackerClient> trackerClients = new ArrayList<>();
for (int index = 0; index < numTrackerClients; index++) {
URI uri = URI.create("URI/" + index);
TrackerClient trackerClient = Mockito.mock(TrackerClient.class);
Mockito.when(trackerClient.getCallTracker()).thenReturn(new CallTrackerImpl(RelativeLoadBalancerStrategyFactory.DEFAULT_UPDATE_INTERVAL_MS));
Mockito.when(trackerClient.getUri()).thenReturn(uri);
Mockito.when(trackerClient.getPartitionWeight(anyInt())).thenReturn(1.0);
Mockito.when(trackerClient.getSubsetWeight(anyInt())).thenReturn(1.0);
trackerClients.add(trackerClient);
}
return trackerClients;
}
use of com.linkedin.d2.discovery.util.Stats in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testAdjustedMinCallCount.
@Test(groups = { "small", "back-end" })
public void testAdjustedMinCallCount() {
final int NUM_CHECKS = 5;
final Long TIME_INTERVAL = 5000L;
Map<String, Object> myMap = lbDefaultConfig();
// myMap.put(PropertyKeys.LB_INITIAL_RECOVERY_LEVEL, 0.01);
// myMap.put("rampFactor", 2d);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, TIME_INTERVAL);
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null, DEGRADER_STATE_LISTENER_FACTORIES);
List<DegraderTrackerClient> clients = new ArrayList<>();
URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
URIRequest request = new URIRequest(uri1);
List<CallCompletion> ccList = new ArrayList<>();
CallCompletion cc;
DegraderTrackerClient client1 = new DegraderTrackerClientImpl(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
clients.add(client1);
// force client1 to be disabled if we encounter errors/high latency
DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient1Default.setOverrideMinCallCount(5);
dcClient1Default.setMinCallCount(5);
dcClient1Default.setUpStep(1.0);
dcClient1Default.setHighErrorRate(0);
// Issue high latency calls to reduce client1 to the minimum number of hash points allowed.
// (1 in this case)
TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
for (int j = 0; j < NUM_CHECKS; j++) {
cc = ((DegraderTrackerClient) resultTC).getCallTracker().startCall();
ccList.add(cc);
}
clock.addMs(3500);
for (int j = 0; j < NUM_CHECKS; j++) {
cc = ccList.get(j);
cc.endCall();
}
// bump to next interval, and get stats.
clock.addMs(5000);
// because we want to test out the adjusted min drop rate, force the hash ring adjustment now.
strategy.setStrategy(DEFAULT_PARTITION_ID, PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
// client1 should be reduced to 1 hash point, but since it is the only TC, it should be the
// TC returned.
assertEquals(resultTC, client1, "expected non-null trackerclient");
assertEquals((int) (strategy.getState().getPartitionState(DEFAULT_PARTITION_ID).getPointsMap().get(client1.getUri())), 1, "expected client1 to have only 1 point in hash map");
// make low latency call, we expect the computedDropRate to be adjusted because the minimum
// call count was also scaled down.
cc = client1.getCallTracker().startCall();
clock.addMs(10);
cc.endCall();
clock.addMs(TIME_INTERVAL);
Assert.assertTrue(dcClient1Default.getCurrentComputedDropRate() < 1.0, "client1 drop rate not less than 1.");
}
Aggregations