use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class SimpleLoadBalancerTest method testLoadBalancerWithWait.
@Test(groups = { "small", "back-end" })
public void testLoadBalancerWithWait() throws URISyntaxException, ServiceUnavailableException, InterruptedException {
URIRequest uriRequest = new URIRequest("d2://NonExistentService");
LoadBalancerTestState state = new LoadBalancerTestState();
SimpleLoadBalancer balancer = new SimpleLoadBalancer(state, 2, TimeUnit.SECONDS, _d2Executor);
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 1");
} catch (ServiceUnavailableException e) {
}
state.listenToService = true;
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 2");
} catch (ServiceUnavailableException e) {
}
state.isListeningToService = true;
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 3");
} catch (ServiceUnavailableException e) {
}
state.getServiceProperties = true;
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 4");
} catch (ServiceUnavailableException e) {
}
state.listenToCluster = true;
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 5");
} catch (ServiceUnavailableException e) {
}
state.isListeningToCluster = true;
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 6");
} catch (ServiceUnavailableException e) {
}
state.getClusterProperties = true;
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 7");
} catch (ServiceUnavailableException e) {
}
state.getUriProperties = true;
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 8");
} catch (ServiceUnavailableException e) {
}
state.getClient = true;
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 9" + "" + "");
} catch (ServiceUnavailableException e) {
}
state.getStrategy = true;
try {
balancer.getClient(uriRequest, new RequestContext());
fail("should have received a service unavailable exception, case 10");
} catch (ServiceUnavailableException e) {
}
state.getPartitionAccessor = true;
// victory
assertNotNull(balancer.getClient(uriRequest, new RequestContext()));
}
use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class WarmUpLoadBalancerTest method testNotDeletingFilesGetClient.
/**
* Since the list might from the fetcher might not be complete (update service, old data, etc.., and the user might
* require additional services at runtime, we have to check that those services are not cleared from the cache
* otherwise it would incur in a penalty at the next deployment
*/
@Test(timeOut = 10000)
public void testNotDeletingFilesGetClient() throws InterruptedException, ExecutionException, TimeoutException, ServiceUnavailableException {
createDefaultServicesIniFiles();
TestLoadBalancer balancer = new TestLoadBalancer();
List<String> allServicesBeforeShutdown = getAllDownstreamServices();
DownstreamServicesFetcher returnNoDownstreams = callback -> callback.onSuccess(Collections.emptyList());
String pickOneService = allServicesBeforeShutdown.get(0);
LoadBalancer warmUpLoadBalancer = new WarmUpLoadBalancer(balancer, balancer, Executors.newSingleThreadScheduledExecutor(), _tmpdir.getAbsolutePath(), MY_SERVICES_FS, returnNoDownstreams, WarmUpLoadBalancer.DEFAULT_SEND_REQUESTS_TIMEOUT_SECONDS, WarmUpLoadBalancer.DEFAULT_CONCURRENT_REQUESTS);
FutureCallback<None> callback = new FutureCallback<>();
warmUpLoadBalancer.start(callback);
callback.get(5000, TimeUnit.MILLISECONDS);
warmUpLoadBalancer.getClient(new URIRequest("d2://" + pickOneService), new RequestContext());
FutureCallback<None> shutdownCallback = new FutureCallback<>();
warmUpLoadBalancer.shutdown(() -> shutdownCallback.onSuccess(None.none()));
shutdownCallback.get(5000, TimeUnit.MILLISECONDS);
List<String> allServicesAfterShutdown = getAllDownstreamServices();
Assert.assertEquals(1, allServicesAfterShutdown.size(), "After shutdown there should be just one service, the one that we 'get the client' on");
}
use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class DegraderLoadBalancerTest method clusterTotalRecovery1TC.
/**
* simulates the situation where a cluster latency gets so high that we will reduce the number of
* points in hashring to 0 and then increase the call drop rate to 1.0
* This will causes the cluster to receive no traffic and we want to see if the cluster can recover
* from such situation.
* @param myMap
* @param clock
* @param timeInterval
* @param strategy
*/
public void clusterTotalRecovery1TC(Map<String, Object> myMap, TestClock clock, Long timeInterval, DegraderLoadBalancerStrategyAdapter strategy) {
final int NUM_CHECKS = 5;
final Long TIME_INTERVAL = timeInterval;
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
List<DegraderTrackerClient> clients = new ArrayList<>();
URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
URIRequest request = new URIRequest(uri1);
DegraderTrackerClient client1 = new DegraderTrackerClientImpl(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
clients.add(client1);
// force client1 to be disabled
DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient1Default.setOverrideMinCallCount(5);
dcClient1Default.setMinCallCount(5);
dcClient1Default.setMaxDropRate(1d);
dcClient1Default.setUpStep(1.0d);
List<CallCompletion> ccList = new ArrayList<>();
CallCompletion cc;
for (int j = 0; j < NUM_CHECKS; j++) {
cc = client1.getCallTracker().startCall();
ccList.add(cc);
}
// add high latency and errors to shut off traffic to this tracker client.
clock.addMs(3500);
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCallWithError();
iter.remove();
}
// go to next time interval.
clock.addMs(TIME_INTERVAL);
Assert.assertEquals(dcClient1Default.getCurrentComputedDropRate(), 1.0);
// trigger a state update
TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
// now we mimic the high latency and force the state to drop all calls so to make
// the overrideClusterDropRate to 1.0
ccList = new ArrayList<>();
for (int j = 0; j < NUM_CHECKS; j++) {
cc = client1.getCallTracker().startCall();
ccList.add(cc);
}
// make sure that the latency is really high
clock.addMs(3500);
for (Iterator<CallCompletion> iter = ccList.listIterator(); iter.hasNext(); ) {
cc = iter.next();
cc.endCallWithError();
iter.remove();
}
// go to next time interval.
clock.addMs(TIME_INTERVAL);
// trigger a state update
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
// this time the cluster override drop rate is set to 1.0 so resultTC should be null because we drop the client
assertNull(resultTC);
assertEquals(strategy.getCurrentOverrideDropRate(), config.getGlobalStepUp());
// add another time interval
clock.addMs(TIME_INTERVAL);
// usually we alternate between LoadBalancing and CallDropping strategy but we want to test
// call dropping strategy
strategy.setStrategyToCallDrop();
// we simulate call drop by not calling callCompletion endCall() or endCallWithEror() like we did above
// because override drop rate is set to 1.0 that means all call will be dropped so resultTc should be null
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
// this time the cluster override drop rate is set to 0.2 because we're recovering
assertEquals(strategy.getCurrentOverrideDropRate(), 1 - config.getGlobalStepDown());
// add another time interval
clock.addMs(TIME_INTERVAL);
// set the strategy to callDropping again
strategy.setStrategyToCallDrop();
// because override drop rate is set to 0.2 and we simulate as if we still don't get any call
// this cycle we will set the override drop rate to 0
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertEquals(strategy.getCurrentOverrideDropRate(), 0.0);
}
use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class SimpleLoadBalancerSimulation method verifyState.
/**
* Compare the simulator's view of reality with the load balancer's. This method should
* be called after every step is performed and all threads have finished.
*/
public void verifyState() {
// verify that we consumed all messages before we do anything
for (int i = 0; i < _queues.length; ++i) {
if (_queues[i].size() > 0) {
fail("there were messages left in the queue. all messages should have been consumed during this simulation step.");
}
}
// verify that all clients have been shut down
for (Map.Entry<String, TransportClientFactory> e : _clientFactories.entrySet()) {
DoNothingClientFactory factory = (DoNothingClientFactory) e.getValue();
if (factory.getRunningClientCount() != 0) {
fail("Not all clients were shut down from factory " + e.getKey());
}
}
try {
final CountDownLatch latch = new CountDownLatch(1);
PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {
@Override
public void done() {
latch.countDown();
}
};
_state.shutdown(callback);
if (!latch.await(60, TimeUnit.SECONDS)) {
fail("unable to shutdown state");
}
} catch (InterruptedException e) {
fail("unable to shutdown state in verifyState.");
}
// New load balancer with no timeout; the code below checks for services that don't
// exist,
// and a load balancer with non-zero timeout will just timeout waiting for them to be
// registered, which will never happen because the PropertyEventThread is shut down.
_loadBalancer = new SimpleLoadBalancer(_state, 0, TimeUnit.SECONDS, _executorService);
// verify services are as we expect
for (String possibleService : _possibleServices) {
// about it
if (!_expectedServiceProperties.containsKey(possibleService) || !_state.isListeningToService(possibleService)) {
LoadBalancerStateItem<ServiceProperties> serviceItem = _state.getServiceProperties(possibleService);
assertTrue(serviceItem == null || serviceItem.getProperty() == null);
} else {
ServiceProperties serviceProperties = _expectedServiceProperties.get(possibleService);
ClusterProperties clusterProperties = _expectedClusterProperties.get(serviceProperties.getClusterName());
UriProperties uriProperties = _expectedUriProperties.get(serviceProperties.getClusterName());
assertEquals(_state.getServiceProperties(possibleService).getProperty(), serviceProperties);
// verify round robin'ing of the hosts for this service
for (int i = 0; i < 100; ++i) {
try {
// this call will queue up messages if we're not listening to the service, but
// it's ok, because all of the messengers have been stopped.
final TransportClient client = _loadBalancer.getClient(new URIRequest("d2://" + possibleService + random(_possiblePaths)), new RequestContext());
// if we didn't receive service unavailable, we should
// get a client back
assertNotNull(client, "Not found client for: d2://" + possibleService + random(_possiblePaths));
} catch (ServiceUnavailableException e) {
if (uriProperties != null && clusterProperties != null) {
// only way to get here is if the prioritized
// schemes could find no available uris in the
// cluster. let's see if we can find a URI that
// matches a prioritized scheme in the cluster.
Set<String> schemes = new HashSet<>();
for (URI uri : uriProperties.Uris()) {
schemes.add(uri.getScheme());
}
for (String scheme : clusterProperties.getPrioritizedSchemes()) {
// the code.
if (schemes.contains(scheme) && _clientFactories.containsKey(scheme)) {
break;
}
assertFalse(schemes.contains(scheme) && _clientFactories.containsKey(scheme), "why couldn't a client be found for schemes " + clusterProperties.getPrioritizedSchemes() + " with URIs: " + uriProperties.Uris());
}
}
}
}
}
}
// verify clusters are as we expect
for (String possibleCluster : _possibleClusters) {
LoadBalancerStateItem<ClusterProperties> clusterItem = _state.getClusterProperties(possibleCluster);
if (!_expectedClusterProperties.containsKey(possibleCluster) || !_state.isListeningToCluster(possibleCluster)) {
assertTrue(clusterItem == null || clusterItem.getProperty() == null, "cluster item for " + possibleCluster + " is not null: " + clusterItem);
} else {
assertNotNull(clusterItem, "Item for cluster " + possibleCluster + " should not be null, listening: " + _state.isListeningToCluster(possibleCluster) + ", keys: " + _expectedClusterProperties.keySet());
assertEquals(clusterItem.getProperty(), _expectedClusterProperties.get(possibleCluster));
}
}
// verify uris are as we expect
for (String possibleCluster : _possibleClusters) {
LoadBalancerStateItem<UriProperties> uriItem = _state.getUriProperties(possibleCluster);
if (!_expectedUriProperties.containsKey(possibleCluster) || !_state.isListeningToCluster(possibleCluster)) {
assertTrue(uriItem == null || uriItem.getProperty() == null);
} else {
assertNotNull(uriItem);
assertEquals(uriItem.getProperty(), _expectedUriProperties.get(possibleCluster));
}
}
}
use of com.linkedin.d2.balancer.util.URIRequest in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testAdjustedMinCallCount.
@Test(groups = { "small", "back-end" })
public void testAdjustedMinCallCount() {
final int NUM_CHECKS = 5;
final Long TIME_INTERVAL = 5000L;
Map<String, Object> myMap = lbDefaultConfig();
// myMap.put(PropertyKeys.LB_INITIAL_RECOVERY_LEVEL, 0.01);
// myMap.put("rampFactor", 2d);
myMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_UPDATE_INTERVAL_MS, TIME_INTERVAL);
TestClock clock = new TestClock();
myMap.put(PropertyKeys.CLOCK, clock);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(myMap);
DegraderLoadBalancerStrategyV3 strategy = new DegraderLoadBalancerStrategyV3(config, "DegraderLoadBalancerTest", null, DEGRADER_STATE_LISTENER_FACTORIES);
List<DegraderTrackerClient> clients = new ArrayList<>();
URI uri1 = URI.create("http://test.linkedin.com:3242/fdsaf");
URIRequest request = new URIRequest(uri1);
List<CallCompletion> ccList = new ArrayList<>();
CallCompletion cc;
DegraderTrackerClient client1 = new DegraderTrackerClientImpl(uri1, getDefaultPartitionData(1d), new TestLoadBalancerClient(uri1), clock, null);
clients.add(client1);
// force client1 to be disabled if we encounter errors/high latency
DegraderControl dcClient1Default = client1.getDegraderControl(DEFAULT_PARTITION_ID);
dcClient1Default.setOverrideMinCallCount(5);
dcClient1Default.setMinCallCount(5);
dcClient1Default.setUpStep(1.0);
dcClient1Default.setHighErrorRate(0);
// Issue high latency calls to reduce client1 to the minimum number of hash points allowed.
// (1 in this case)
TrackerClient resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
assertNotNull(resultTC, "expected non-null trackerclient");
for (int j = 0; j < NUM_CHECKS; j++) {
cc = ((DegraderTrackerClient) resultTC).getCallTracker().startCall();
ccList.add(cc);
}
clock.addMs(3500);
for (int j = 0; j < NUM_CHECKS; j++) {
cc = ccList.get(j);
cc.endCall();
}
// bump to next interval, and get stats.
clock.addMs(5000);
// because we want to test out the adjusted min drop rate, force the hash ring adjustment now.
strategy.setStrategy(DEFAULT_PARTITION_ID, PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE);
resultTC = getTrackerClient(strategy, request, new RequestContext(), 1, clients);
// client1 should be reduced to 1 hash point, but since it is the only TC, it should be the
// TC returned.
assertEquals(resultTC, client1, "expected non-null trackerclient");
assertEquals((int) (strategy.getState().getPartitionState(DEFAULT_PARTITION_ID).getPointsMap().get(client1.getUri())), 1, "expected client1 to have only 1 point in hash map");
// make low latency call, we expect the computedDropRate to be adjusted because the minimum
// call count was also scaled down.
cc = client1.getCallTracker().startCall();
clock.addMs(10);
cc.endCall();
clock.addMs(TIME_INTERVAL);
Assert.assertTrue(dcClient1Default.getCurrentComputedDropRate() < 1.0, "client1 drop rate not less than 1.");
}
Aggregations