use of com.linkedin.d2.balancer.strategies.LoadBalancerQuarantine in project rest.li by linkedin.
the class D2MonitorEventEmitter method createUriInfoBuilder.
// Create UriInfoBuilder from corresponding TrackerClient
private D2MonitorBuilder.D2MonitorUriInfoBuilder createUriInfoBuilder(TrackerClient client, Map<URI, Integer> pointsMap, Map<TrackerClient, LoadBalancerQuarantine> quarantineMap) {
D2MonitorBuilder.D2MonitorUriInfoBuilder uriInfoBuilder = new D2MonitorBuilder.D2MonitorUriInfoBuilder(client.getUri());
uriInfoBuilder.copyStats(client.getLatestCallStats());
uriInfoBuilder.setTransmissionPoints(pointsMap.get(client.getUri()));
LoadBalancerQuarantine quarantine = quarantineMap.get(client);
if (quarantine != null) {
uriInfoBuilder.setQuarantineDuration(quarantine.getTimeTilNextCheck());
}
return uriInfoBuilder;
}
use of com.linkedin.d2.balancer.strategies.LoadBalancerQuarantine in project rest.li by linkedin.
the class QuarantineManager method checkAndRemoveQuarantine.
/**
* Check if the quarantine still applies for each tracker client.
* Remove it from the map if the quarantine is no long applicable. Put the client into recovery state right after the quarantine.
*
* @param partitionState The current state of the partition
*/
private void checkAndRemoveQuarantine(PartitionState partitionState) {
Map<TrackerClient, LoadBalancerQuarantine> quarantineMap = partitionState.getQuarantineMap();
Map<TrackerClient, LoadBalancerQuarantine> quarantineHistory = partitionState.getQuarantineHistory();
Set<TrackerClient> recoverySet = partitionState.getRecoveryTrackerClients();
for (TrackerClient trackerClient : partitionState.getTrackerClients()) {
LoadBalancerQuarantine quarantine = quarantineMap.get(trackerClient);
if (quarantine != null && quarantine.checkUpdateQuarantineState()) {
// Evict client from quarantine
quarantineMap.remove(trackerClient);
quarantineHistory.put(trackerClient, quarantine);
recoverySet.add(trackerClient);
}
}
}
use of com.linkedin.d2.balancer.strategies.LoadBalancerQuarantine in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testHealthCheckRequestContextNotShared.
@Test
public void testHealthCheckRequestContextNotShared() {
final DegraderLoadBalancerStrategyConfig config = new DegraderLoadBalancerStrategyConfig(1000);
final TestClock clock = new TestClock();
final DegraderImpl.Config degraderConfig = DegraderConfigFactory.toDegraderConfig(Collections.emptyMap());
final DegraderTrackerClient trackerClient = createTrackerClient(1, clock, degraderConfig).get(0);
final TestLoadBalancerClient testLoadBalancerClient = (TestLoadBalancerClient) trackerClient.getTransportClient();
final DegraderTrackerClientUpdater degraderTrackerClientUpdater = new DegraderTrackerClientUpdater(trackerClient, DEFAULT_PARTITION_ID);
final LoadBalancerQuarantine quarantine = new LoadBalancerQuarantine(degraderTrackerClientUpdater.getTrackerClient(), config, "abc0");
final TransportHealthCheck healthCheck = (TransportHealthCheck) quarantine.getHealthCheckClient();
healthCheck.checkHealth(Callbacks.empty());
final RequestContext requestContext1 = testLoadBalancerClient._requestContext;
final Map<String, String> wireAttrs1 = testLoadBalancerClient._wireAttrs;
healthCheck.checkHealth(Callbacks.empty());
final RequestContext requestContext2 = testLoadBalancerClient._requestContext;
final Map<String, String> wireAttrs2 = testLoadBalancerClient._wireAttrs;
Assert.assertEquals(requestContext1, requestContext2);
Assert.assertNotSame(requestContext1, requestContext2, "RequestContext should not be shared between requests.");
Assert.assertEquals(wireAttrs1, wireAttrs2);
Assert.assertNotSame(wireAttrs1, wireAttrs2, "Wire attributes should not be shared between requests.");
}
use of com.linkedin.d2.balancer.strategies.LoadBalancerQuarantine in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testDegraderLoadBalancerStateComparison.
@Test(groups = { "small", "back-end" })
public void testDegraderLoadBalancerStateComparison() throws URISyntaxException {
long clusterGenerationId = 1;
long lastUpdated = 29999;
long currentAverageClusterLatency = 3000;
Map<String, Object> configMap = new HashMap<>();
configMap.put(PropertyKeys.HTTP_LB_LOW_WATER_MARK, 500d);
configMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_POINTS_PER_WEIGHT, 120);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(configMap);
long clusterCallCount = 15;
Map<DegraderTrackerClient, LoadBalancerQuarantine> quarantineMap = new HashMap<>();
Map<DegraderTrackerClient, LoadBalancerQuarantine> quarantineStore = new HashMap<>();
double currentOverrideDropRate = 0.4;
boolean initialized = true;
String name = "degraderV2";
Map<URI, Integer> points = new HashMap<>();
Map<DegraderTrackerClient, Double> recoveryMap = new HashMap<>();
URI uri1 = new URI("http://test.linkedin.com:10010/abc0");
URI uri2 = new URI("http://test.linkedin.com:10010/abc1");
URI uri3 = new URI("http://test.linkedin.com:10010/abc2");
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
RingFactory<URI> ringFactory = new DelegatingRingFactory<>(config);
TestClock clock = new TestClock();
List<DegraderTrackerClient> clients = createTrackerClient(3, clock, null);
List<DegraderTrackerClientUpdater> clientUpdaters = new ArrayList<>();
for (DegraderTrackerClient client : clients) {
recoveryMap.put(client, 0.0);
clientUpdaters.add(new DegraderTrackerClientUpdater(client, DEFAULT_PARTITION_ID));
}
// test DegraderLoadBalancerV3
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
PartitionDegraderLoadBalancerState.Strategy strategyV3 = PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING;
PartitionDegraderLoadBalancerState oldStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
PartitionDegraderLoadBalancerState newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId + 1, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated + 300, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 77);
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 50);
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate + 0.4, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency + 55, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
// we don't care about averageClusterLatency for comparing states
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
for (DegraderTrackerClient client : clients) {
recoveryMap.put(client, 0.5);
}
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
// test state health comparison
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
// make cluster average latency to be 300 to be lower than lowWaterMark but still not healthy because
// points map has clients with less than perfect health
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
// make all points to have 120 so the cluster becomes "healthy"
points.put(uri1, 120);
points.put(uri2, 120);
points.put(uri3, 120);
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertTrue(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
// if currentAverageClusterLatency is > low water mark then cluster becomes unhealthy
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
}
use of com.linkedin.d2.balancer.strategies.LoadBalancerQuarantine in project rest.li by linkedin.
the class QuarantineManagerTest method testEnrollOneQuarantineOneRecovery.
@Test
public void testEnrollOneQuarantineOneRecovery() {
LoadBalancerQuarantine quarantine = Mockito.mock(LoadBalancerQuarantine.class);
List<TrackerClient> trackerClients = TrackerClientMockHelper.mockTrackerClients(3);
Map<TrackerClient, LoadBalancerQuarantine> existingQuarantineMap = new HashMap<>();
existingQuarantineMap.put(trackerClients.get(1), quarantine);
Mockito.when(quarantine.checkUpdateQuarantineState()).thenReturn(true);
setup(0.5, true, true);
_quarantineManager.tryEnableQuarantine();
PartitionState state = new PartitionStateTestDataBuilder().setTrackerClientStateMap(trackerClients, Arrays.asList(StateUpdater.MIN_HEALTH_SCORE, StateUpdater.MIN_HEALTH_SCORE, QuarantineManager.INITIAL_RECOVERY_HEALTH_SCORE), Arrays.asList(TrackerClientState.HealthState.UNHEALTHY, TrackerClientState.HealthState.NEUTRAL, TrackerClientState.HealthState.UNHEALTHY), Arrays.asList(20, 20, 20)).build();
_quarantineManager.updateQuarantineState(state, state, DEFAULT_AVG_CLUSTER_LATENCY);
assertEquals(state.getRecoveryTrackerClients().size(), 1);
assertTrue(state.getRecoveryTrackerClients().contains(trackerClients.get(1)));
assertEquals(state.getQuarantineMap().size(), 1);
assertTrue(state.getQuarantineMap().containsKey(trackerClients.get(0)));
}
Aggregations