use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class DegraderLoadBalancerTest method simulateAndTestOneInterval.
/**
* simulates calling all the clients with the given QPS according to the given interval.
* Then verify that the DegraderLoadBalancerState behaves as expected.
* @param expectedPointsPerClient we'll verify if the points are smaller than the number given here
* @param isCalledWithError
* @param isCalledWithErrorForLoadBalancing
*/
private TrackerClient simulateAndTestOneInterval(long timeInterval, TestClock clock, double qps, List<TrackerClient> clients, DegraderLoadBalancerStrategyAdapter adapter, long clusterGenerationId, Integer expectedPointsPerClient, boolean isExpectingDropCallStrategyForNewState, double expectedClusterOverrideDropRate, long latency, boolean isCalledWithError, boolean isCalledWithErrorForLoadBalancing) {
callClients(latency, qps, clients, clock, timeInterval, isCalledWithError, isCalledWithErrorForLoadBalancing);
//create any random URIRequest because we just need a URI to be hashed to get the point in hash ring anyway
if (clients != null && !clients.isEmpty()) {
URIRequest request = new URIRequest(clients.get(0).getUri());
TrackerClient client = getTrackerClient(adapter, request, new RequestContext(), clusterGenerationId, clients);
Map<URI, Integer> pointsMap = adapter.getPointsMap();
for (TrackerClient trackerClient : clients) {
Integer pointsInTheRing = pointsMap.get(trackerClient.getUri());
assertEquals(pointsInTheRing, expectedPointsPerClient);
}
if (isExpectingDropCallStrategyForNewState) {
assertTrue(adapter.isStrategyCallDrop());
} else {
assertFalse(adapter.isStrategyCallDrop());
}
assertEquals(adapter.getCurrentOverrideDropRate(), expectedClusterOverrideDropRate);
return client;
}
return null;
}
use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testCallDroppingHelper.
private void testCallDroppingHelper(DegraderLoadBalancerStrategyAdapter strategyAdapter, List<TrackerClient> clients, TestClock clock, Long timeInterval) {
//test clusterOverrideDropRate won't increase even though latency is 3000 ms because the traffic is low
callClients(3000, 0.2, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
URIRequest request = new URIRequest(clients.get(0).getUri());
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.0d, strategyAdapter.getCurrentOverrideDropRate()));
//if we increase the QPS from 0.2 to 25, then we'll start dropping calls
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.2d, strategyAdapter.getCurrentOverrideDropRate()));
;
// if we set the QPS to be somewhere below high and low water mark then the drop rate stays the same
// even though the latency is high
callClients(3000, 2, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.2d, strategyAdapter.getCurrentOverrideDropRate()));
// now we want to degrade the cluster even further
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.4d, strategyAdapter.getCurrentOverrideDropRate()));
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.6d, strategyAdapter.getCurrentOverrideDropRate()));
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.8d, strategyAdapter.getCurrentOverrideDropRate()));
callClients(3000, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(1.0d, strategyAdapter.getCurrentOverrideDropRate()));
//if we have qps below lowWaterMark, we will reduce drop rate even though latency is high
callClients(3000, 0.5, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.8d, strategyAdapter.getCurrentOverrideDropRate()));
//if we have qps below lowWaterMark and qps is low, we will also reduce drop rate
callClients(100, 0.5, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.6d, strategyAdapter.getCurrentOverrideDropRate()));
//if we have qps between lowWaterMark and highWaterMark and latency is low, we will reduce drop rate
callClients(100, 2, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.4d, strategyAdapter.getCurrentOverrideDropRate()));
//if we have qps higher than highWaterMark and latency is low, we will reduce drop rate
callClients(100, 25, clients, clock, timeInterval, false, false);
strategyAdapter.setStrategyToCallDrop();
getTrackerClient(strategyAdapter, request, new RequestContext(), 1, clients);
assertTrue(isEqual(0.2d, strategyAdapter.getCurrentOverrideDropRate()));
}
use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class DegraderLoadBalancerTest method clusterTotalRecovery1TC.
/**
* simulates the situation where a cluster latency gets so high that we will reduce the number of
* points in hashring to 0 and then increase the call drop rate to 1.0
* This will causes the cluster to receive no traffic and we want to see if the cluster can recover
* from such situation.
* @param myMap
* @param clock
* @param timeInterval
* @param strategy
*/
public void clusterTotalRecovery1TC(Map<String, Object> myMap, TestClock clock, Long timeInterval, DegraderLoadBalancerStrategyAdapter strategy) {
final int NUM_CHECKS = 5;
final Long TIME_INTERVAL = timeInterval;
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
List<TrackerClient> clients = new ArrayList<TrackerClient>();
URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
URIRequest request = new URIRequest(uri1);
TrackerClient client1 = new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
clients.add(client1);
// force client1 to be disabled
DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient1Default.setOverrideMinCallCount(5);
dcClient1Default.setMinCallCount(5);
dcClient1Default.setMaxDropRate(1d);
dcClient1Default.setUpStep(1.0d);
List<CallCompletion> ccList = new ArrayList<CallCompletion>();
CallCompletion cc;
for (int j = 0; j < NUM_CHECKS; j++) {
cc = client1.getCallTracker().startCall();
ccList.add(cc);
}
// add high latency and errors to shut off traffic to this tracker client.
clock.addMs(3500);
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCallWithError();
iter.remove();
}
// go to next time interval.
clock.addMs(TIME_INTERVAL);
Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);
// trigger a state update
TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
// now we mimic the high latency and force the state to drop all calls so to make
// the overrideClusterDropRate to 1.0
ccList = new ArrayList<CallCompletion>();
for (int j = 0; j < NUM_CHECKS; j++) {
cc = client1.getCallTracker().startCall();
ccList.add(cc);
}
//make sure that the latency is really high
clock.addMs(3500);
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCallWithError();
iter.remove();
}
// go to next time interval.
clock.addMs(TIME_INTERVAL);
// trigger a state update
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
//this time the cluster override drop rate is set to 1.0 so resultTC should be null because we drop the client
assertNull(resultTC);
assertEquals(strategy.getCurrentOverrideDropRate(), config.getGlobalStepUp());
// add another time interval
clock.addMs(TIME_INTERVAL);
// usually we alternate between LoadBalancing and CallDropping strategy but we want to test
// call dropping strategy
strategy.setStrategyToCallDrop();
// we simulate call drop by not calling callCompletion endCall() or endCallWithEror() like we did above
// because override drop rate is set to 1.0 that means all call will be dropped so resultTc should be null
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
// this time the cluster override drop rate is set to 0.2 because we're recovering
assertEquals(strategy.getCurrentOverrideDropRate(), 1 - config.getGlobalStepDown());
// add another time interval
clock.addMs(TIME_INTERVAL);
// set the strategy to callDropping again
strategy.setStrategyToCallDrop();
// because override drop rate is set to 0.2 and we simulate as if we still don't get any call
// this cycle we will set the override drop rate to 0
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertEquals(strategy.getCurrentOverrideDropRate(), 0.0);
}
use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class DegraderLoadBalancerTest method clusterRecovery1TC.
/**
* helper method to test DegraderLoadBalancerStrategy recovery with 1 TrackerClient.
*
* We want to test DegraderV2 and V3 with 2 different strategies : LoadBalacing and Call Dropping.
* So this method needs to able to handle all 4 permutations.
*
* @param myMap
* @param clock
* @param stepsToFullRecovery
* @param timeInterval
* @param strategy
*/
public void clusterRecovery1TC(Map<String, Object> myMap, TestClock clock, int stepsToFullRecovery, Long timeInterval, DegraderLoadBalancerStrategyAdapter strategy, DegraderLoadBalancerStrategyV2_1.DegraderLoadBalancerState.Strategy strategyV2, DegraderLoadBalancerStrategyV3.PartitionDegraderLoadBalancerState.Strategy strategyV3) {
final int NUM_CHECKS = 5;
final Long TIME_INTERVAL = timeInterval;
int localStepsToFullRecovery = stepsToFullRecovery;
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
List<TrackerClient> clients = new ArrayList<TrackerClient>();
URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
URIRequest request = new URIRequest(uri1);
TrackerClient client1 = new TrackerClient(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
clients.add(client1);
// force client1 to be disabled
DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient1Default.setOverrideMinCallCount(5);
dcClient1Default.setMinCallCount(5);
dcClient1Default.setMaxDropRate(1d);
dcClient1Default.setUpStep(1.0d);
List<CallCompletion> ccList = new ArrayList<CallCompletion>();
CallCompletion cc;
for (int j = 0; j < NUM_CHECKS; j++) {
cc = client1.getCallTracker().startCall();
ccList.add(cc);
}
// add high latency and errors to shut off traffic to this tracker client.
// note: the default values for highError and lowError in the degrader are 1.1,
// which means we don't use errorRates when deciding when to lb/degrade.
// In addition, because we changed to use the
clock.addMs(3500);
//for (int j = 0; j < NUM_CHECKS; j++)
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCallWithError();
iter.remove();
}
// go to next time interval.
clock.addMs(TIME_INTERVAL);
Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);
// trigger a state update
TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
if (config.getInitialRecoveryLevel() < 0.01) {
//the returned TrackerClient should be null
assertNull(resultTC, "expected null trackerclient");
// tracker client, so it's time to try it out. We need to enter this code at least once.
do {
// go to next time interval.
clock.addMs(TIME_INTERVAL);
// try adjusting the hash ring on this updateState
if (strategyV3 != null) {
strategy.setStrategyV3(DEFAULT_PARTITION_ID, strategyV3);
} else if (strategyV2 != null) {
strategy.setStrategyV2(strategyV2);
} else {
fail("should set strategy (either LoadBalance or Degrader");
}
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
localStepsToFullRecovery--;
} while (localStepsToFullRecovery > 0);
}
assertNotNull(resultTC, "expected non-null trackerclient");
// make calls to the tracker client to verify that it's on the road to healthy status.
for (int j = 0; j < NUM_CHECKS; j++) {
cc = resultTC.getCallTracker().startCall();
ccList.add(cc);
}
clock.addMs(10);
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCall();
iter.remove();
}
// go to next time interval.
clock.addMs(TIME_INTERVAL);
Assert.assertTrue(dcClient1Default.getCurrentComputedDropRate() < 1d);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
}
use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class ZKFSTest method testZKDown.
@Test
public void testZKDown() throws Exception {
final String TEST_SERVICE_NAME = "testingService";
final String TEST_CLUSTER_NAME = "someCluster";
startServer();
try {
ZKFSLoadBalancer balancer = getBalancer();
FutureCallback<None> callback = new FutureCallback<None>();
balancer.start(callback);
callback.get(30, TimeUnit.SECONDS);
ZKConnection conn = new ZKConnection("localhost:" + PORT, 30000);
conn.start();
ZooKeeperPermanentStore<ServiceProperties> store = new ZooKeeperPermanentStore<ServiceProperties>(conn, new ServicePropertiesJsonSerializer(), ZKFSUtil.servicePath(BASE_PATH));
callback = new FutureCallback<None>();
store.start(callback);
callback.get(30, TimeUnit.SECONDS);
ServiceProperties props = new ServiceProperties(TEST_SERVICE_NAME, TEST_CLUSTER_NAME, "/somePath", Arrays.asList("degrader"), Collections.<String, Object>emptyMap(), null, null, Arrays.asList("http"), null);
store.put(TEST_SERVICE_NAME, props);
ZooKeeperPermanentStore<ClusterProperties> clusterStore = new ZooKeeperPermanentStore<ClusterProperties>(conn, new ClusterPropertiesJsonSerializer(), ZKFSUtil.clusterPath(BASE_PATH));
callback = new FutureCallback<None>();
clusterStore.start(callback);
callback.get(30, TimeUnit.SECONDS);
ClusterProperties clusterProps = new ClusterProperties("someCluster");
clusterStore.put(TEST_CLUSTER_NAME, clusterProps);
ZKConnection serverConn = new ZKConnection("localhost:" + PORT, 30000);
serverConn.start();
ZooKeeperEphemeralStore<UriProperties> uriStore = new ZooKeeperEphemeralStore<UriProperties>(serverConn, new UriPropertiesJsonSerializer(), new UriPropertiesMerger(), ZKFSUtil.uriPath(BASE_PATH));
callback = new FutureCallback<None>();
uriStore.start(callback);
callback.get(30, TimeUnit.SECONDS);
ZooKeeperServer server = new ZooKeeperServer(uriStore);
callback = new FutureCallback<None>();
Map<Integer, PartitionData> partitionDataMap = new HashMap<Integer, PartitionData>();
partitionDataMap.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1.0));
server.markUp(TEST_CLUSTER_NAME, URI.create("http://test.uri"), partitionDataMap, callback);
callback.get(30, TimeUnit.SECONDS);
URIRequest request = new URIRequest("d2://" + TEST_SERVICE_NAME + "/foo");
TransportClient client = balancer.getClient(request, new RequestContext());
// Stop the server to cause a disconnect event
stopServer();
// Sleep to ensure the disconnect has propagated; ideally the Toggle should expose
// some interface to allow detection that the toggle occurred
Thread.sleep(1000);
// Now see if it still works
client = balancer.getClient(request, new RequestContext());
} finally {
stopServer();
}
}
Aggregations