Search in sources :

Example 6 with ClockedExecutor

use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.

the class TestConstantQpsRateLimiter method submitOnceGetMany.

@Test(timeOut = TEST_TIMEOUT)
public void submitOnceGetMany() {
    ClockedExecutor executor = new ClockedExecutor();
    ClockedExecutor circularBufferExecutor = new ClockedExecutor();
    ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(circularBufferExecutor));
    rateLimiter.setRate(TEST_QPS, ONE_SECOND, UNLIMITED_BURST);
    rateLimiter.setBufferCapacity(1);
    TattlingCallback<None> tattler = new TattlingCallback<>(executor);
    rateLimiter.submit(tattler);
    executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
    Assert.assertTrue(tattler.getInteractCount() > 1);
}
Also used : ConstantQpsRateLimiter(com.linkedin.r2.transport.http.client.ConstantQpsRateLimiter) ClockedExecutor(com.linkedin.test.util.ClockedExecutor) None(com.linkedin.common.util.None) Test(org.testng.annotations.Test)

Example 7 with ClockedExecutor

use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.

the class TestConstantQpsRateLimiter method eventLoopStopsWhenTtlExpiresAllRequests.

@Test(timeOut = TEST_TIMEOUT)
public void eventLoopStopsWhenTtlExpiresAllRequests() {
    ClockedExecutor executor = new ClockedExecutor();
    ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(executor));
    rateLimiter.setRate(TEST_QPS, ONE_SECOND, UNLIMITED_BURST);
    rateLimiter.setBufferTtl(ONE_SECOND - 1, ChronoUnit.MILLIS);
    TattlingCallback<None> tattler = new TattlingCallback<>(executor);
    rateLimiter.submit(tattler);
    executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
    Assert.assertSame(tattler.getInteractCount(), (int) TEST_QPS);
    long prevTaskCount = executor.getExecutedTaskCount();
    executor.runFor(ONE_SECOND * TEST_NUM_CYCLES);
    // EventLoop continues by scheduling itself at the end. If executed task count remains the same,
    // then EventLoop hasn't re-scheduled itself.
    Assert.assertSame(executor.getExecutedTaskCount(), prevTaskCount);
}
Also used : ConstantQpsRateLimiter(com.linkedin.r2.transport.http.client.ConstantQpsRateLimiter) ClockedExecutor(com.linkedin.test.util.ClockedExecutor) None(com.linkedin.common.util.None) Test(org.testng.annotations.Test)

Example 8 with ClockedExecutor

use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.

the class TestConstantQpsRateLimiter method testLowRateHighlyParallelConsistentRandomness.

// Known to be flaky in CI
@Test(retryAnalyzer = ThreeRetries.class)
public void testLowRateHighlyParallelConsistentRandomness() {
    // Simulate a large production cluster dispatching a very low rate of traffic.
    // This test verifies that the resulting qps from a distributed collection of dispatchers
    // follows a predictable pattern within the defined tolerances.
    int maxBurstFailCount = 0;
    int burstFreqFailCount = 0;
    int zeroFreqFailCount = 0;
    for (int n = 0; n < TEST_NUM_CYCLES; n++) {
        // Set simulated test time such that each replica sends exactly one request.
        int totalRuntime = (int) (ONE_SECOND / (TEST_QPS / LARGE_TEST_NUM_REPLICAS));
        List<Long> queryTimes = new ArrayList<>();
        for (int i = 0; i < LARGE_TEST_NUM_REPLICAS; i++) {
            ClockedExecutor executor = new ClockedExecutor();
            ConstantQpsRateLimiter rateLimiter = new ConstantQpsRateLimiter(executor, executor, executor, TestEvictingCircularBuffer.getBuffer(executor));
            rateLimiter.setBufferTtl(Integer.MAX_VALUE, ChronoUnit.DAYS);
            rateLimiter.setBufferCapacity(1);
            // Split an already low TEST_QPS across a large number of replicas
            rateLimiter.setRate(TEST_QPS / LARGE_TEST_NUM_REPLICAS, ONE_SECOND, 1);
            TattlingCallback<None> tattler = new TattlingCallback<>(executor);
            rateLimiter.submit(tattler);
            // Intermix inbound queries while running clock at the defined rate
            for (int x = 0; x < totalRuntime; x = x + ONE_SECOND / LARGE_TEST_INBOUND_QPS_PER_REPLICA) {
                // ensure that calling setRate before submitting a new callback does not detrimentally affect random distribution
                rateLimiter.setRate(TEST_QPS / LARGE_TEST_NUM_REPLICAS, ONE_SECOND, 1);
                rateLimiter.submit(tattler);
                executor.runFor(ONE_SECOND / LARGE_TEST_INBOUND_QPS_PER_REPLICA);
            }
            for (Long stamp : tattler.getOccurrences()) {
                // Prefer this over making totalRuntime 1ms shorter since it keeps the math clean
                if (stamp != totalRuntime) {
                    queryTimes.add(stamp);
                }
            }
        }
        // each replica should have only sent one request
        assert (queryTimes.size() == LARGE_TEST_NUM_REPLICAS);
        int[] queriesPerBucketedSecond = new int[totalRuntime / ONE_SECOND];
        for (Long stamp : queryTimes) {
            int idx = (int) (stamp / ONE_SECOND);
            queriesPerBucketedSecond[idx]++;
        }
        // ensure the cluster sent an average of the TEST_QPS
        assert (Arrays.stream(queriesPerBucketedSecond).average().getAsDouble() == TEST_QPS);
        // Ensure our bursts in queries in a given second aren't too high
        if (Arrays.stream(queriesPerBucketedSecond).max().getAsInt() > TEST_QPS * LARGE_TEST_MAX_BURST_MULTIPLE) {
            maxBurstFailCount++;
        }
        ;
        // Make sure though that we don't see too many seconds with high query volume
        if (Arrays.stream(queriesPerBucketedSecond).filter(a -> a > TEST_QPS * LARGE_TEST_MAX_BURST_MULTIPLE * 0.67).count() > LARGE_TEST_MAX_BURST_FREQUENCY_COUNT) {
            burstFreqFailCount++;
        }
        // Make sure we don't have too many cases of sending zero qps.
        if (Arrays.stream(queriesPerBucketedSecond).filter(a -> a == 0).count() > LARGE_TEST_MAX_ZERO_FREQUENCY_COUNT) {
            zeroFreqFailCount++;
        }
    }
    // Query volume stability assertions should be true within the defined confidence value
    int acceptableFailCount = Math.round((TEST_NUM_CYCLES * (1 - LARGE_TEST_QUERY_VOLUME_CONSISTENCY_CONFIDENCE)));
    assert (maxBurstFailCount <= acceptableFailCount);
    assert (burstFreqFailCount <= acceptableFailCount);
    assert (zeroFreqFailCount <= acceptableFailCount);
}
Also used : ConstantQpsRateLimiter(com.linkedin.r2.transport.http.client.ConstantQpsRateLimiter) ArrayList(java.util.ArrayList) ClockedExecutor(com.linkedin.test.util.ClockedExecutor) None(com.linkedin.common.util.None) Test(org.testng.annotations.Test)

Example 9 with ClockedExecutor

use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.

the class BaseTestSmoothRateLimiter method testSetRateInstantaneous.

@Test(timeOut = TEST_TIMEOUT)
public void testSetRateInstantaneous() {
    ClockedExecutor clockedExecutor = new ClockedExecutor();
    AsyncRateLimiter rateLimiter = getRateLimiter(clockedExecutor, clockedExecutor, clockedExecutor);
    List<FutureCallback<None>> callbacks = new ArrayList<>();
    IntStream.range(0, 10).forEachOrdered(i -> {
        FutureCallback<None> callback = new FutureCallback<>();
        rateLimiter.submit(callback);
        callbacks.add(callback);
    });
    // the last set should take immediately effect, and therefore at ms 0, we should have 3 permits available
    rateLimiter.setRate(0d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
    rateLimiter.setRate(1d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
    rateLimiter.setRate(2d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
    rateLimiter.setRate(3d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
    // trigger task to run them until current time
    clockedExecutor.runFor(0);
    // We have one permit to begin with so the first task should run immediate and left with four pending
    IntStream.range(0, 3).forEach(i -> assertTrue(callbacks.get(i).isDone(), i + " should have been executed " + callbacks.get(i)));
    IntStream.range(3, 10).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
    clockedExecutor.runFor(ONE_MILLISECOND_PERIOD);
    IntStream.range(3, 6).forEach(i -> assertTrue(callbacks.get(i).isDone(), i + " should have been executed " + callbacks.get(i)));
    IntStream.range(6, 10).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
}
Also used : AsyncRateLimiter(com.linkedin.r2.transport.http.client.AsyncRateLimiter) ArrayList(java.util.ArrayList) ClockedExecutor(com.linkedin.test.util.ClockedExecutor) None(com.linkedin.common.util.None) FutureCallback(com.linkedin.common.callback.FutureCallback) Test(org.testng.annotations.Test)

Example 10 with ClockedExecutor

use of com.linkedin.test.util.ClockedExecutor in project rest.li by linkedin.

the class BaseTestSmoothRateLimiter method testSetRate.

@Test(timeOut = TEST_TIMEOUT)
public void testSetRate() throws Exception {
    ClockedExecutor clockedExecutor = new ClockedExecutor();
    AsyncRateLimiter rateLimiter = getRateLimiter(clockedExecutor, clockedExecutor, clockedExecutor);
    rateLimiter.setRate(ONE_PERMIT_PER_PERIOD, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
    List<FutureCallback<None>> callbacks = new ArrayList<>();
    IntStream.range(0, 5).forEach(i -> {
        FutureCallback<None> callback = new FutureCallback<>();
        rateLimiter.submit(callback);
        callbacks.add(callback);
    });
    // trigger task to run them until current time
    clockedExecutor.runFor(0);
    // We have one permit to begin with so the first task should run immediate and left with four pending
    callbacks.get(0).get();
    IntStream.range(0, 1).forEach(i -> assertTrue(callbacks.get(i).isDone()));
    IntStream.range(1, 5).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
    clockedExecutor.runFor(ONE_MILLISECOND_PERIOD);
    // We set the permit rate to two per period and increment the clock by one millisecond. We expect two
    // more callbacks to be invoked at the next permit issuance
    rateLimiter.setRate(2d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
    clockedExecutor.runFor(0);
    callbacks.get(1).get();
    callbacks.get(2).get();
    IntStream.range(0, 3).forEach(i -> assertTrue(callbacks.get(i).isDone()));
    IntStream.range(3, 5).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
    // We set the permit rate back to one per period and increment the clock by one millisecond. We expect
    // only one more callbacks to be invoked at the next permit issuance
    rateLimiter.setRate(1d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
    clockedExecutor.runFor(ONE_MILLISECOND_PERIOD);
    callbacks.get(3).get();
    IntStream.range(0, 4).forEach(i -> assertTrue(callbacks.get(i).isDone()));
    IntStream.range(4, 5).forEach(i -> assertFalse(callbacks.get(i).isDone(), i + " should not have been executed"));
    // We set the permit rate to two per period again and increment the clock by one millisecond. We expect
    // only one more callbacks to be invoked at the next permit issuance because only one is left
    rateLimiter.setRate(2d, ONE_MILLISECOND_PERIOD, UNLIMITED_BURST);
    clockedExecutor.runFor(ONE_MILLISECOND_PERIOD);
    callbacks.get(4).get();
    IntStream.range(0, 5).forEach(i -> assertTrue(callbacks.get(i).isDone()));
}
Also used : AsyncRateLimiter(com.linkedin.r2.transport.http.client.AsyncRateLimiter) ArrayList(java.util.ArrayList) ClockedExecutor(com.linkedin.test.util.ClockedExecutor) None(com.linkedin.common.util.None) FutureCallback(com.linkedin.common.callback.FutureCallback) Test(org.testng.annotations.Test)

Aggregations

ClockedExecutor (com.linkedin.test.util.ClockedExecutor)16 Test (org.testng.annotations.Test)14 None (com.linkedin.common.util.None)12 ArrayList (java.util.ArrayList)9 ConstantQpsRateLimiter (com.linkedin.r2.transport.http.client.ConstantQpsRateLimiter)7 FutureCallback (com.linkedin.common.callback.FutureCallback)6 AsyncRateLimiter (com.linkedin.r2.transport.http.client.AsyncRateLimiter)3 ExecutionException (java.util.concurrent.ExecutionException)3 LongTracking (com.linkedin.common.stats.LongTracking)2 DarkClusterDispatcher (com.linkedin.darkcluster.api.DarkClusterDispatcher)2 DefaultDarkClusterDispatcher (com.linkedin.darkcluster.impl.DefaultDarkClusterDispatcher)2 RestRequest (com.linkedin.r2.message.rest.RestRequest)2 RestRequestBuilder (com.linkedin.r2.message.rest.RestRequestBuilder)2 AsyncPoolImpl (com.linkedin.r2.transport.http.client.AsyncPoolImpl)2 ExponentialBackOffRateLimiter (com.linkedin.r2.transport.http.client.ExponentialBackOffRateLimiter)2 ObjectCreationTimeoutException (com.linkedin.r2.transport.http.client.ObjectCreationTimeoutException)2 PoolStats (com.linkedin.r2.transport.http.client.PoolStats)2 SmoothRateLimiter (com.linkedin.r2.transport.http.client.SmoothRateLimiter)2 TimeoutException (java.util.concurrent.TimeoutException)2 DarkClusterConfig (com.linkedin.d2.DarkClusterConfig)1