use of com.google.common.util.concurrent.RateLimiter in project bookkeeper by apache.
the class Etcd64bitIdGeneratorTest method testGenerateIdParallel.
/**
* Test generating id in parallel and ensure there is no duplicated id.
*/
@Test
public void testGenerateIdParallel() throws Exception {
final int numThreads = 10;
@Cleanup("shutdown") ExecutorService executor = Executors.newFixedThreadPool(numThreads);
final int numIds = 10000;
final AtomicLong totalIds = new AtomicLong(numIds);
final Set<Long> ids = Collections.newSetFromMap(new ConcurrentHashMap<>());
final RateLimiter limiter = RateLimiter.create(1000);
final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
for (int i = 0; i < numThreads; i++) {
executor.submit(() -> {
Client client = Client.builder().endpoints(etcdContainer.getClientEndpoint()).build();
Etcd64bitIdGenerator gen = new Etcd64bitIdGenerator(client.getKVClient(), scope);
AtomicBoolean running = new AtomicBoolean(true);
while (running.get()) {
limiter.acquire();
GenericCallbackFuture<Long> genFuture = new GenericCallbackFuture<>();
gen.generateLedgerId(genFuture);
genFuture.thenAccept(lid -> {
boolean duplicatedFound = !(ids.add(lid));
if (duplicatedFound) {
running.set(false);
doneFuture.completeExceptionally(new IllegalStateException("Duplicated id " + lid + " generated : " + ids));
return;
} else {
if (totalIds.decrementAndGet() <= 0) {
running.set(false);
doneFuture.complete(null);
}
}
}).exceptionally(cause -> {
running.set(false);
doneFuture.completeExceptionally(cause);
return null;
});
}
});
}
FutureUtils.result(doneFuture);
assertTrue(totalIds.get() <= 0);
assertTrue(ids.size() >= numIds);
}
use of com.google.common.util.concurrent.RateLimiter in project bookkeeper by apache.
the class TestNonBlockingReadsMultiReader method testMultiReaders.
@Test(timeout = 60000)
public void testMultiReaders() throws Exception {
String name = "distrlog-multireaders";
final RateLimiter limiter = RateLimiter.create(1000);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setOutputBufferSize(0);
confLocal.setImmediateFlushEnabled(true);
DistributedLogManager dlmwrite = createNewDLM(confLocal, name);
final AsyncLogWriter writer = dlmwrite.startAsyncLogSegmentNonPartitioned();
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(0)));
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(1)));
final AtomicInteger writeCount = new AtomicInteger(2);
DistributedLogManager dlmread = createNewDLM(conf, name);
BKSyncLogReader reader0 = (BKSyncLogReader) dlmread.getInputStream(0);
try {
ReaderThread[] readerThreads = new ReaderThread[1];
readerThreads[0] = new ReaderThread("reader0-non-blocking", reader0, false);
// readerThreads[1] = new ReaderThread("reader1-non-blocking", reader0, false);
final AtomicBoolean running = new AtomicBoolean(true);
Thread writerThread = new Thread("WriteThread") {
@Override
public void run() {
try {
long txid = 2;
DLSN dlsn = DLSN.InvalidDLSN;
while (running.get()) {
limiter.acquire();
long curTxId = txid++;
dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
writeCount.incrementAndGet();
if (curTxId % 1000 == 0) {
LOG.info("writer write {}", curTxId);
}
}
LOG.info("Completed writing record at {}", dlsn);
Utils.close(writer);
} catch (DLInterruptedException die) {
Thread.currentThread().interrupt();
} catch (Exception e) {
}
}
};
for (ReaderThread rt : readerThreads) {
rt.start();
}
writerThread.start();
TimeUnit.SECONDS.sleep(5);
LOG.info("Stopping writer");
running.set(false);
writerThread.join();
LOG.info("Writer stopped after writing {} records, waiting for reader to complete", writeCount.get());
while (writeCount.get() > (readerThreads[0].getReadCount())) {
LOG.info("Write Count = {}, Read Count = {}", new Object[] { writeCount.get(), readerThreads[0].getReadCount() });
TimeUnit.MILLISECONDS.sleep(100);
}
assertEquals(writeCount.get(), (readerThreads[0].getReadCount()));
for (ReaderThread readerThread : readerThreads) {
readerThread.stopReading();
}
} finally {
dlmwrite.close();
reader0.close();
dlmread.close();
}
}
use of com.google.common.util.concurrent.RateLimiter in project bookkeeper by apache.
the class JournalWriter method write.
void write(int threadIdx, Journal[] journals, int numLedgersForThisThread, double writeRate, int maxOutstandingBytesForThisThread, long numRecordsForThisThread, long numBytesForThisThread) throws Exception {
log.info("Write thread {} started with : rate = {}," + " num records = {}, num bytes = {}, max outstanding bytes = {}", threadIdx, writeRate, numRecordsForThisThread, numBytesForThisThread, maxOutstandingBytesForThisThread);
RateLimiter limiter;
if (writeRate > 0) {
limiter = RateLimiter.create(writeRate);
} else {
limiter = null;
}
final Semaphore semaphore;
if (maxOutstandingBytesForThisThread > 0) {
semaphore = new Semaphore(maxOutstandingBytesForThisThread);
} else {
semaphore = null;
}
// Acquire 1 second worth of records to have a slower ramp-up
if (limiter != null) {
limiter.acquire((int) writeRate);
}
long totalWritten = 0L;
long totalBytesWritten = 0L;
final int numJournals = journals.length;
byte[] payload = new byte[flags.recordSize];
ThreadLocalRandom.current().nextBytes(payload);
ByteBuf payloadBuf = Unpooled.wrappedBuffer(payload);
long[] entryIds = new long[numLedgersForThisThread];
Arrays.fill(entryIds, 0L);
while (true) {
for (int i = 0; i < numJournals; i++) {
int ledgerIdx = ThreadLocalRandom.current().nextInt(numLedgersForThisThread);
long lid = threadIdx * numLedgersForThisThread + ledgerIdx;
long eid = entryIds[ledgerIdx]++;
ByteBuf buf = payloadBuf.retainedDuplicate();
int len = buf.readableBytes();
if (numRecordsForThisThread > 0 && totalWritten >= numRecordsForThisThread) {
markPerfDone();
}
if (numBytesForThisThread > 0 && totalBytesWritten >= numBytesForThisThread) {
markPerfDone();
}
if (null != semaphore) {
semaphore.acquire(len);
}
totalWritten++;
totalBytesWritten += len;
if (null != limiter) {
limiter.acquire(len);
}
final long sendTime = System.nanoTime();
journals[i].logAddEntry(lid, eid, buf, false, (rc, ledgerId, entryId, addr, ctx) -> {
buf.release();
if (0 == rc) {
if (null != semaphore) {
semaphore.release(len);
}
recordsWritten.increment();
bytesWritten.add(len);
long latencyMicros = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - sendTime);
recorder.recordValue(latencyMicros);
cumulativeRecorder.recordValue(latencyMicros);
} else {
log.warn("Error at writing records : ", BookieException.create(rc));
Runtime.getRuntime().exit(-1);
}
}, null);
}
}
}
use of com.google.common.util.concurrent.RateLimiter in project promregator by promregator.
the class ReactiveCFPaginatedRequestFetcherTest method testInfiniteRateLimitPossible.
@Test
void testInfiniteRateLimitPossible() {
RateLimiter rl = RateLimiter.create(Double.POSITIVE_INFINITY);
boolean acquired = rl.tryAcquire(10000, Duration.ofMillis(100));
Assertions.assertTrue(acquired);
}
Aggregations