use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.
the class TestConstantQpsRateLimiter method submitOnceGetMany.
@Test(timeOut = TEST_TIMEOUT)
public void submitOnceGetMany() {
ClockedExecutor executor = new ClockedExecutor();
ClockedExecutor circularBufferExecutor = new ClockedExecutor();
ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(circularBufferExecutor));
rateLimiter.setRate(TEST_QPS, ONE_SECOND, UNLIMITED_BURST);
rateLimiter.setBufferCapacity(1);
TattlingCallback<None> tattler = new TattlingCallback<>(executor);
rateLimiter.submit(tattler);
executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
Assert.assertTrue(tattler.getInteractCount() > 1);
}
use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.
the class TestConstantQpsRateLimiter method eventLoopStopsWhenTtlExpiresAllRequests.
@Test(timeOut = TEST_TIMEOUT)
public void eventLoopStopsWhenTtlExpiresAllRequests() {
ClockedExecutor executor = new ClockedExecutor();
ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(executor));
rateLimiter.setRate(TEST_QPS, ONE_SECOND, UNLIMITED_BURST);
rateLimiter.setBufferTtl(ONE_SECOND - 1, ChronoUnit.MILLIS);
TattlingCallback<None> tattler = new TattlingCallback<>(executor);
rateLimiter.submit(tattler);
executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
Assert.assertSame(tattler.getInteractCount(), (int) TEST_QPS);
long prevTaskCount = executor.getExecutedTaskCount();
executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
// EventLoop continues by scheduling itself at the end. If executed task count remains the same,
// then EventLoop hasn't re-scheduled itself.
Assert.assertSame(executor.getExecutedTaskCount(), prevTaskCount);
}
use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.
the class TestConstantQpsRateLimiter method testLowRateHighlyParallelConsistentRandomness.
// Known to be flaky in CI
@Test(retryAnalyzer = ThreeRetries.class)
public void testLowRateHighlyParallelConsistentRandomness() {
// Simulate a large production cluster dispatching a very low rate of traffic.
// This test verifies that the resulting qps from a distributed collection of dispatchers
// follows a predictable pattern within the defined tolerances.
int maxBurstFailCount = 0;
int burstFreqFailCount = 0;
int zeroFreqFailCount = 0;
for (int n = 0; n < TEST_NUM_CYCLES; n++) {
// Set simulated test time such that each replica sends exactly one request.
int totalRuntime = (int) (ONE_SECOND / (TEST_QPS / LARGE_TEST_NUM_REPLICAS));
List<Long> queryTimes = new ArrayList<>();
for (int i = 0; i < LARGE_TEST_NUM_REPLICAS; i++) {
ClockedExecutor executor = new ClockedExecutor();
ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(executor));
rateLimiter.setBufferTtl(Integer.MAX_VALUE, ChronoUnit.DAYS);
rateLimiter.setBufferCapacity(1);
// Split an already low TEST_QPS across a large number of replicas
rateLimiter.setRate(TEST_QPS / LARGE_TEST_NUM_REPLICAS, ONE_SECOND, 1);
TattlingCallback<None> tattler = new TattlingCallback<>(executor);
rateLimiter.submit(tattler);
// Intermix inbound queries while running clock at the defined rate
for (int x = 0; x < totalRuntime; x = x + ONE_SECOND / LARGE_TEST_INBOUND_QPS_PER_REPLICA) {
// ensure that calling setRate before submitting a new callback does not detrimentally affect random distribution
rateLimiter.setRate(TEST_QPS / LARGE_TEST_NUM_REPLICAS, ONE_SECOND, 1);
rateLimiter.submit(tattler);
executor.runFor(ONE_SECOND / LARGE_TEST_INBOUND_QPS_PER_REPLICA);
}
for (Long stamp : tattler.getOccurrences()) {
// Prefer this over making totalRuntime 1ms shorter since it keeps the math clean
if (stamp != totalRuntime) {
queryTimes.add(stamp);
}
}
}
// each replica should have only sent one request
assert (queryTimes.size() == LARGE_TEST_NUM_REPLICAS);
int[] queriesPerBucketedSecond = new int[totalRuntime / ONE_SECOND];
for (Long stamp : queryTimes) {
int idx = (int) (stamp / ONE_SECOND);
queriesPerBucketedSecond[idx]++;
}
// ensure the cluster sent an average of the TEST_QPS
assert (Arrays.stream(queriesPerBucketedSecond).average().getAsDouble() == TEST_QPS);
// Ensure our bursts in queries in a given second aren't too high
if (Arrays.stream(queriesPerBucketedSecond).max().getAsInt() > TEST_QPS * LARGE_TEST_MAX_BURST_MULTIPLE) {
maxBurstFailCount++;
}
;
// Make sure though that we don't see too many seconds with high query volume
if (Arrays.stream(queriesPerBucketedSecond).filter(a -> a > TEST_QPS * LARGE_TEST_MAX_BURST_MULTIPLE * 0.67).count() > LARGE_TEST_MAX_BURST_FREQUENCY_COUNT) {
burstFreqFailCount++;
}
// Make sure we don't have too many cases of sending zero qps.
if (Arrays.stream(queriesPerBucketedSecond).filter(a -> a == 0).count() > LARGE_TEST_MAX_ZERO_FREQUENCY_COUNT) {
zeroFreqFailCount++;
}
}
// Query volume stability assertions should be true within the defined confidence value
int acceptableFailCount = Math.round((TEST_NUM_CYCLES * (1 - LARGE_TEST_QUERY_VOLUME_CONSISTENCY_CONFIDENCE)));
assert (maxBurstFailCount <= acceptableFailCount);
assert (burstFreqFailCount <= acceptableFailCount);
assert (zeroFreqFailCount <= acceptableFailCount);
}
use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.
the class BaseTestSmoothRateLimiter method testSetRateInstantaneous.
@Test(timeOut = TEST_TIMEOUT)
public void testSetRateInstantaneous() {
ClockedExecutor clockedExecutor = new ClockedExecutor();
AsyncRateLimiter rateLimiter = getRateLimiter(clockedExecutor, clockedExecutor, clockedExecutor);
List<FutureCallback<None>> callbacks = new ArrayList<>();
IntStream.range(0, 10).forEachOrdered(i -> {
FutureCallback<None> callback = new FutureCallback<>();
rateLimiter.submit(callback);
callbacks.add(callback);
});
// the last set should take immediately effect, and therefore at ms 0, we should have 3 permits available
rateLimiter.setRate(0d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
rateLimiter.setRate(1d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
rateLimiter.setRate(2d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
rateLimiter.setRate(3d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
// trigger task to run them until current time
clockedExecutor.runFor(0);
// We have one permit to begin with so the first task should run immediate and left with four pending
IntStream.range(0, 3).forEach(i -> assertTrue(callbacks.get(i).isDone(), i + " should have been executed " + callbacks.get(i)));
IntStream.range(3, 10).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
clockedExecutor.runFor(ONE_MILLISECOND_PERIOD);
IntStream.range(3, 6).forEach(i -> assertTrue(callbacks.get(i).isDone(), i + " should have been executed " + callbacks.get(i)));
IntStream.range(6, 10).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
}
use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.
the class BaseTestSmoothRateLimiter method testSetRate.
@Test(timeOut = TEST_TIMEOUT)
public void testSetRate() throws Exception {
ClockedExecutor clockedExecutor = new ClockedExecutor();
AsyncRateLimiter rateLimiter = getRateLimiter(clockedExecutor, clockedExecutor, clockedExecutor);
rateLimiter.setRate(ONE_PERMIT_PER_PERIOD, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
List<FutureCallback<None>> callbacks = new ArrayList<>();
IntStream.range(0, 5).forEach(i -> {
FutureCallback<None> callback = new FutureCallback<>();
rateLimiter.submit(callback);
callbacks.add(callback);
});
// trigger task to run them until current time
clockedExecutor.runFor(0);
// We have one permit to begin with so the first task should run immediate and left with four pending
callbacks.get(0).get();
IntStream.range(0, 1).forEach(i -> assertTrue(callbacks.get(i).isDone()));
IntStream.range(1, 5).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
clockedExecutor.runFor(ONE_MILLISECOND_PERIOD);
// We set the permit rate to two per period and increment the clock by one millisecond. We expect two
// more callbacks to be invoked at the next permit issuance
rateLimiter.setRate(2d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
clockedExecutor.runFor(0);
callbacks.get(1).get();
callbacks.get(2).get();
IntStream.range(0, 3).forEach(i -> assertTrue(callbacks.get(i).isDone()));
IntStream.range(3, 5).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
// We set the permit rate back to one per period and increment the clock by one millisecond. We expect
// only one more callbacks to be invoked at the next permit issuance
rateLimiter.setRate(1d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
clockedExecutor.runFor(ONE_MILLISECOND_PERIOD);
callbacks.get(3).get();
IntStream.range(0, 4).forEach(i -> assertTrue(callbacks.get(i).isDone()));
IntStream.range(4, 5).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
// We set the permit rate to two per period again and increment the clock by one millisecond. We expect
// only one more callbacks to be invoked at the next permit issuance because only one is left
rateLimiter.setRate(2d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
clockedExecutor.runFor(ONE_MILLISECOND_PERIOD);
callbacks.get(4).get();
IntStream.range(0, 5).forEach(i -> assertTrue(callbacks.get(i).isDone()));
}
Aggregations