use of com.linkedin.d2.balancer.strategies.DelegatingRingFactory in project rest.li by linkedin.
the class DegraderLoadBalancerState method getPartition.
public Partition getPartition(int partitionId) {
Partition partition = _partitions.get(partitionId);
if (partition == null) {
// this is mainly executed in bootstrap time
// after the system is stabilized, i.e. after all partitionIds have been seen,
// there will be no need to initialize the map
// Note that we do this trick because partition count is not available in
// service configuration (it's in cluster configuration) and we do not want to
// intermingle the two configurations
Partition newValue = new Partition(partitionId, new ReentrantLock(), new PartitionDegraderLoadBalancerState(-1, _config.getClock().currentTimeMillis(), false, new DelegatingRingFactory<>(_config), new HashMap<>(), PartitionDegraderLoadBalancerState.Strategy.LOAD_BALANCE, 0, 0, new HashMap<>(), _serviceName, _degraderProperties, 0, 0, 0, new HashMap<>(), new HashMap<>(), null, 0), _degraderStateListenerFactories.stream().map(factory -> factory.create(partitionId, _config)).collect(Collectors.toList()));
Partition oldValue = _partitions.putIfAbsent(partitionId, newValue);
if (oldValue == null)
partition = newValue;
else
// another thread already initialized this partition
// newValue is discarded
partition = oldValue;
}
return partition;
}
use of com.linkedin.d2.balancer.strategies.DelegatingRingFactory in project rest.li by linkedin.
the class DegraderLoadBalancerTest method testDegraderLoadBalancerStateComparison.
@Test(groups = { "small", "back-end" })
public void testDegraderLoadBalancerStateComparison() throws URISyntaxException {
long clusterGenerationId = 1;
long lastUpdated = 29999;
long currentAverageClusterLatency = 3000;
Map<String, Object> configMap = new HashMap<>();
configMap.put(PropertyKeys.HTTP_LB_LOW_WATER_MARK, 500d);
configMap.put(PropertyKeys.HTTP_LB_STRATEGY_PROPERTIES_POINTS_PER_WEIGHT, 120);
DegraderLoadBalancerStrategyConfig config = DegraderLoadBalancerStrategyConfig.createHttpConfigFromMap(configMap);
long clusterCallCount = 15;
Map<DegraderTrackerClient, LoadBalancerQuarantine> quarantineMap = new HashMap<>();
Map<DegraderTrackerClient, LoadBalancerQuarantine> quarantineStore = new HashMap<>();
double currentOverrideDropRate = 0.4;
boolean initialized = true;
String name = "degraderV2";
Map<URI, Integer> points = new HashMap<>();
Map<DegraderTrackerClient, Double> recoveryMap = new HashMap<>();
URI uri1 = new URI("http://test.linkedin.com:10010/abc0");
URI uri2 = new URI("http://test.linkedin.com:10010/abc1");
URI uri3 = new URI("http://test.linkedin.com:10010/abc2");
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
RingFactory<URI> ringFactory = new DelegatingRingFactory<>(config);
TestClock clock = new TestClock();
List<DegraderTrackerClient> clients = createTrackerClient(3, clock, null);
List<DegraderTrackerClientUpdater> clientUpdaters = new ArrayList<>();
for (DegraderTrackerClient client : clients) {
recoveryMap.put(client, 0.0);
clientUpdaters.add(new DegraderTrackerClientUpdater(client, DEFAULT_PARTITION_ID));
}
// test DegraderLoadBalancerV3
points.put(uri1, 100);
points.put(uri2, 50);
points.put(uri3, 120);
PartitionDegraderLoadBalancerState.Strategy strategyV3 = PartitionDegraderLoadBalancerState.Strategy.CALL_DROPPING;
PartitionDegraderLoadBalancerState oldStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
PartitionDegraderLoadBalancerState newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId + 1, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated + 300, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 77);
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
points.put(uri2, 50);
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate + 0.4, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency + 55, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
// we don't care about averageClusterLatency for comparing states
assertTrue(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
for (DegraderTrackerClient client : clients) {
recoveryMap.put(client, 0.5);
}
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isOldStateTheSameAsNewState(oldStateV3, newStateV3));
// test state health comparison
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
// make cluster average latency to be 300 to be lower than lowWaterMark but still not healthy because
// points map has clients with less than perfect health
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
// make all points to have 120 so the cluster becomes "healthy"
points.put(uri1, 120);
points.put(uri2, 120);
points.put(uri3, 120);
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, 300, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertTrue(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
// if currentAverageClusterLatency is > low water mark then cluster becomes unhealthy
newStateV3 = new PartitionDegraderLoadBalancerState(clusterGenerationId, lastUpdated, initialized, ringFactory, points, strategyV3, currentOverrideDropRate, currentAverageClusterLatency, recoveryMap, name, null, clusterCallCount, 0, 0, quarantineMap, quarantineStore, null, 0);
assertFalse(DegraderLoadBalancerStrategyV3.isNewStateHealthy(newStateV3, config, clientUpdaters, DEFAULT_PARTITION_ID));
}
use of com.linkedin.d2.balancer.strategies.DelegatingRingFactory in project rest.li by linkedin.
the class ConsistentHashRingTest method testManyItemsEqualWeight.
@Test(groups = { "small", "back-end" })
public void testManyItemsEqualWeight() {
Map<String, Integer> many = new HashMap<>();
Map<String, AtomicInteger> counts = new HashMap<>();
for (int i = 0; i < 100; ++i) {
many.put("test" + i, 10);
counts.put("test" + i, new AtomicInteger());
}
DelegatingRingFactory<String> ringFactory = new DelegatingRingFactory<>(new DegraderLoadBalancerStrategyConfig(1L));
ConsistentHashRing<String> test = (ConsistentHashRing<String>) ringFactory.createRing(many);
assertNotNull(test.get(0));
// verify that each test item has 10 points on the ring
List<ConsistentHashRing.Point<String>> points = test.getPoints();
for (int i = 0; i < points.size(); ++i) {
counts.get(points.get(i).getT()).incrementAndGet();
}
for (Entry<String, AtomicInteger> count : counts.entrySet()) {
assertEquals(count.getValue().get(), 10);
}
double highLowDiff = test.getHighLowDiffOfAreaRing();
assertTrue(highLowDiff < 1.54, "Hash Ring area diff is greater than it should be, saw diff of: " + highLowDiff);
}
use of com.linkedin.d2.balancer.strategies.DelegatingRingFactory in project rest.li by linkedin.
the class RelativeLoadBalancerStrategyFactory method putDefaultValues.
static D2RelativeStrategyProperties putDefaultValues(D2RelativeStrategyProperties properties) {
properties.setUpStep(getOrDefault(properties.getUpStep(), DEFAULT_UP_STEP));
properties.setDownStep(getOrDefault(properties.getDownStep(), DEFAULT_DOWN_STEP));
properties.setHighErrorRate(getOrDefault(properties.getHighErrorRate(), DEFAULT_HIGH_ERROR_RATE));
properties.setLowErrorRate(getOrDefault(properties.getLowErrorRate(), DEFAULT_LOW_ERROR_RATE));
properties.setRelativeLatencyHighThresholdFactor(getOrDefault(properties.getRelativeLatencyHighThresholdFactor(), DEFAULT_RELATIVE_LATENCY_HIGH_THRESHOLD_FACTOR));
properties.setRelativeLatencyLowThresholdFactor(getOrDefault(properties.getRelativeLatencyLowThresholdFactor(), DEFAULT_RELATIVE_LATENCY_LOW_THRESHOLD_FACTOR));
properties.setMinCallCount(getOrDefault(properties.getMinCallCount(), DEFAULT_MIN_CALL_COUNT));
properties.setUpdateIntervalMs(getOrDefault(properties.getUpdateIntervalMs(), DEFAULT_UPDATE_INTERVAL_MS));
properties.setInitialHealthScore(getOrDefault(properties.getInitialHealthScore(), DEFAULT_INITIAL_HEALTH_SCORE));
properties.setSlowStartThreshold(getOrDefault(properties.getSlowStartThreshold(), DEFAULT_SLOW_START_THRESHOLD));
properties.setErrorStatusFilter(getOrDefault(properties.getErrorStatusFilter(), DEFAULT_ERROR_STATUS_FILTER));
properties.setEmittingIntervalMs(getOrDefault(properties.getEmittingIntervalMs(), DEFAULT_EMITTING_INTERVAL_MS));
properties.setEnableFastRecovery(getOrDefault(properties.isEnableFastRecovery(), DEFAULT_ENABLE_FAST_RECOVERY));
D2QuarantineProperties quarantineProperties = properties.hasQuarantineProperties() ? properties.getQuarantineProperties() : new D2QuarantineProperties();
quarantineProperties.setQuarantineMaxPercent(getOrDefault(quarantineProperties.getQuarantineMaxPercent(), DEFAULT_QUARANTINE_MAX_PERCENT));
quarantineProperties.setHealthCheckMethod(getOrDefault(quarantineProperties.getHealthCheckMethod(), DEFAULT_HTTP_METHOD));
properties.setQuarantineProperties(quarantineProperties);
// Most ring properties are initialized in {@link DelegatingRingFactory}
D2RingProperties ringProperties = properties.hasRingProperties() ? properties.getRingProperties() : new D2RingProperties();
ringProperties.setPointsPerWeight(getOrDefault(ringProperties.getPointsPerWeight(), DEFAULT_POINTS_PER_WEIGHT));
properties.setRingProperties(ringProperties);
return properties;
}
use of com.linkedin.d2.balancer.strategies.DelegatingRingFactory in project rest.li by linkedin.
the class ConsistentHashRingSimulatorConfig method toSimulator.
/**
* Creates a {@link ConsistentHashRingSimulator} from the config
*
* @return A ConsistentHashRingSimulator instance
*/
public ConsistentHashRingSimulator toSimulator() {
String hashingAlgorithm = getHashingAlgorithm();
double balancingFactor = getBoundedLoadBalancingFactor();
DegraderLoadBalancerStrategyConfig degraderLoadBalancerStrategyConfig = getConfig(hashingAlgorithm, balancingFactor);
RingFactory<String> testFactory = new DelegatingRingFactory<>(degraderLoadBalancerStrategyConfig);
Map<String, Integer> pointsMap = new HashMap<>();
int serverID = 0;
for (Server server : getServers()) {
for (int i = 0; i < server.getNumber(); i++) {
pointsMap.put("Server" + serverID, server.getPoints());
serverID += 1;
}
}
DegraderLoadBalancerStrategyConfig consistentConfig = getConfig(hashingAlgorithm, Double.POSITIVE_INFINITY);
RingFactory<String> consistentFactory = new DelegatingRingFactory<>(consistentConfig);
List<com.linkedin.d2.balancer.util.hashing.simulator.Client> clients = new ArrayList<>();
int clientID = 0;
for (ConsistentHashRingSimulatorConfig.Client client : getClients()) {
for (int i = 0; i < client.getNumber(); i++) {
clients.add(new com.linkedin.d2.balancer.util.hashing.simulator.Client("Client" + clientID, client, getShuffleRequests()));
clientID++;
}
}
int serverCapacity = getServerCapacity();
return new ConsistentHashRingSimulator(testFactory, consistentFactory, clients, pointsMap, serverCapacity);
}
Aggregations