use of com.linkedin.r2.transport.http.client.ConstantQpsRateLimiter in project rest.li by linkedin.
the class TestDarkClusterStrategyFactory method setup.
@BeforeMethod
public void setup() {
_clusterInfoProvider = new MockClusterInfoProvider();
Facilities facilities = new MockFacilities(_clusterInfoProvider);
DarkClusterConfig darkClusterConfigOld = createRelativeTrafficMultiplierConfig(0.5f);
_clusterInfoProvider.addDarkClusterConfig(SOURCE_CLUSTER_NAME, PREEXISTING_DARK_CLUSTER_NAME, darkClusterConfigOld);
DarkClusterDispatcher darkClusterDispatcher = new DefaultDarkClusterDispatcher(new MockClient(false));
ClockedExecutor executor = new ClockedExecutor();
_rateLimiterSupplier = () -> new ConstantQpsRateLimiter(executor, executor, executor, TestConstantQpsDarkClusterStrategy.getBuffer(executor));
_strategyFactory = new DarkClusterStrategyFactoryImpl(facilities, SOURCE_CLUSTER_NAME, darkClusterDispatcher, new DoNothingNotifier(), new Random(SEED), new CountingVerifierManager(), _rateLimiterSupplier);
_strategyFactory.start();
}
use of com.linkedin.r2.transport.http.client.ConstantQpsRateLimiter in project rest.li by linkedin.
the class TestConstantQpsRateLimiter method submitOnceGetMany.
@Test(timeOut = TEST_TIMEOUT)
public void submitOnceGetMany() {
ClockedExecutor executor = new ClockedExecutor();
ClockedExecutor circularBufferExecutor = new ClockedExecutor();
ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(circularBufferExecutor));
rateLimiter.setRate(TEST_QPS, ONE_SECOND, UNLIMITED_BURST);
rateLimiter.setBufferCapacity(1);
TattlingCallback<None> tattler = new TattlingCallback<>(executor);
rateLimiter.submit(tattler);
executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
Assert.assertTrue(tattler.getInteractCount() > 1);
}
use of com.linkedin.r2.transport.http.client.ConstantQpsRateLimiter in project rest.li by linkedin.
the class TestConstantQpsRateLimiter method eventLoopStopsWhenTtlExpiresAllRequests.
@Test(timeOut = TEST_TIMEOUT)
public void eventLoopStopsWhenTtlExpiresAllRequests() {
ClockedExecutor executor = new ClockedExecutor();
ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(executor));
rateLimiter.setRate(TEST_QPS, ONE_SECOND, UNLIMITED_BURST);
rateLimiter.setBufferTtl(ONE_SECOND - 1, ChronoUnit.MILLIS);
TattlingCallback<None> tattler = new TattlingCallback<>(executor);
rateLimiter.submit(tattler);
executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
Assert.assertSame(tattler.getInteractCount(), (int) TEST_QPS);
long prevTaskCount = executor.getExecutedTaskCount();
executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
// EventLoop continues by scheduling itself at the end. If executed task count remains the same,
// then EventLoop hasn't re-scheduled itself.
Assert.assertSame(executor.getExecutedTaskCount(), prevTaskCount);
}
use of com.linkedin.r2.transport.http.client.ConstantQpsRateLimiter in project rest.li by linkedin.
the class TestConstantQpsRateLimiter method testLowRateHighlyParallelConsistentRandomness.
// Known to be flaky in CI
@Test(retryAnalyzer = ThreeRetries.class)
public void testLowRateHighlyParallelConsistentRandomness() {
// Simulate a large production cluster dispatching a very low rate of traffic.
// This test verifies that the resulting qps from a distributed collection of dispatchers
// follows a predictable pattern within the defined tolerances.
int maxBurstFailCount = 0;
int burstFreqFailCount = 0;
int zeroFreqFailCount = 0;
for (int n = 0; n < TEST_NUM_CYCLES; n++) {
// Set simulated test time such that each replica sends exactly one request.
int totalRuntime = (int) (ONE_SECOND / (TEST_QPS / LARGE_TEST_NUM_REPLICAS));
List<Long> queryTimes = new ArrayList<>();
for (int i = 0; i < LARGE_TEST_NUM_REPLICAS; i++) {
ClockedExecutor executor = new ClockedExecutor();
ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(executor));
rateLimiter.setBufferTtl(Integer.MAX_VALUE, ChronoUnit.DAYS);
rateLimiter.setBufferCapacity(1);
// Split an already low TEST_QPS across a large number of replicas
rateLimiter.setRate(TEST_QPS / LARGE_TEST_NUM_REPLICAS, ONE_SECOND, 1);
TattlingCallback<None> tattler = new TattlingCallback<>(executor);
rateLimiter.submit(tattler);
// Intermix inbound queries while running clock at the defined rate
for (int x = 0; x < totalRuntime; x = x + ONE_SECOND / LARGE_TEST_INBOUND_QPS_PER_REPLICA) {
// ensure that calling setRate before submitting a new callback does not detrimentally affect random distribution
rateLimiter.setRate(TEST_QPS / LARGE_TEST_NUM_REPLICAS, ONE_SECOND, 1);
rateLimiter.submit(tattler);
executor.runFor(ONE_SECOND / LARGE_TEST_INBOUND_QPS_PER_REPLICA);
}
for (Long stamp : tattler.getOccurrences()) {
// Prefer this over making totalRuntime 1ms shorter since it keeps the math clean
if (stamp != totalRuntime) {
queryTimes.add(stamp);
}
}
}
// each replica should have only sent one request
assert (queryTimes.size() == LARGE_TEST_NUM_REPLICAS);
int[] queriesPerBucketedSecond = new int[totalRuntime / ONE_SECOND];
for (Long stamp : queryTimes) {
int idx = (int) (stamp / ONE_SECOND);
queriesPerBucketedSecond[idx]++;
}
// ensure the cluster sent an average of the TEST_QPS
assert (Arrays.stream(queriesPerBucketedSecond).average().getAsDouble() == TEST_QPS);
// Ensure our bursts in queries in a given second aren't too high
if (Arrays.stream(queriesPerBucketedSecond).max().getAsInt() > TEST_QPS * LARGE_TEST_MAX_BURST_MULTIPLE) {
maxBurstFailCount++;
}
;
// Make sure though that we don't see too many seconds with high query volume
if (Arrays.stream(queriesPerBucketedSecond).filter(a -> a > TEST_QPS * LARGE_TEST_MAX_BURST_MULTIPLE * 0.67).count() > LARGE_TEST_MAX_BURST_FREQUENCY_COUNT) {
burstFreqFailCount++;
}
// Make sure we don't have too many cases of sending zero qps.
if (Arrays.stream(queriesPerBucketedSecond).filter(a -> a == 0).count() > LARGE_TEST_MAX_ZERO_FREQUENCY_COUNT) {
zeroFreqFailCount++;
}
}
// Query volume stability assertions should be true within the defined confidence value
int acceptableFailCount = Math.round((TEST_NUM_CYCLES * (1 - LARGE_TEST_QUERY_VOLUME_CONSISTENCY_CONFIDENCE)));
assert (maxBurstFailCount <= acceptableFailCount);
assert (burstFreqFailCount <= acceptableFailCount);
assert (zeroFreqFailCount <= acceptableFailCount);
}
use of com.linkedin.r2.transport.http.client.ConstantQpsRateLimiter in project rest.li by linkedin.
the class TestConstantQpsRateLimiter method ensureRandomButConstantRate.
@Test
public void ensureRandomButConstantRate() {
ClockedExecutor executor = new ClockedExecutor();
ClockedExecutor circularBufferExecutor = new ClockedExecutor();
ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(circularBufferExecutor));
rateLimiter.setRate(200d, ONE_SECOND, 1);
rateLimiter.setBufferCapacity(1);
TattlingCallback<None> tattler = new TattlingCallback<>(executor);
rateLimiter.submit(tattler);
executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
long prevTime = 0;
List<Long> timeDeltas = new ArrayList<>();
for (Long stamp : tattler.getOccurrences()) {
timeDeltas.add(stamp - prevTime);
prevTime = stamp;
}
// Ensure variance up to 10 possible time deltas given a rate of 200 requests per second
Set<Long> uniqueTimeDeltas = new HashSet<>(timeDeltas);
assert (uniqueTimeDeltas.size() > 8 && uniqueTimeDeltas.size() < 11);
}
Aggregations