use of com.google.common.util.concurrent.RateLimiter in project incubator-pulsar by apache.
the class SimpleProducerConsumerStatTest method testBatchMessagesRateOut.
public void testBatchMessagesRateOut() throws PulsarClientException, InterruptedException, PulsarAdminException {
log.info("-- Starting {} test --", methodName);
String topicName = "persistent://my-property/cluster/my-ns/testBatchMessagesRateOut";
double produceRate = 17;
int batchSize = 5;
Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("my-subscriber-name").subscribe();
Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).batchingMaxMessages(batchSize).enableBatching(true).batchingMaxPublishDelay(2, TimeUnit.SECONDS).create();
AtomicBoolean runTest = new AtomicBoolean(true);
Thread t1 = new Thread(() -> {
RateLimiter r = RateLimiter.create(produceRate);
while (runTest.get()) {
r.acquire();
producer.sendAsync("Hello World".getBytes());
consumer.receiveAsync().thenAccept(consumer::acknowledgeAsync);
}
});
t1.start();
// Two seconds sleep
Thread.sleep(2000);
runTest.set(false);
pulsar.getBrokerService().updateRates();
double actualRate = admin.persistentTopics().getStats(topicName).msgRateOut;
assertTrue(actualRate > (produceRate / batchSize));
consumer.unsubscribe();
log.info("-- Exiting {} test --", methodName);
}
use of com.google.common.util.concurrent.RateLimiter in project gateway-dubbox by zhuzhong.
the class SystemConfigParaUtil method setPermitsOfRateLimiter.
public static void setPermitsOfRateLimiter(double permitsPerSecond) {
RateLimiter rateLimiter = RateLimiter.create(permitsPerSecond);
container.put(ratelimit_key, rateLimiter);
}
use of com.google.common.util.concurrent.RateLimiter in project RestyPass by darren-fu.
the class TrafficLimitFilter method getRateLimiter.
/**
* 获取限速器
*
* @param restyCommand
* @return limiter
*/
private RateLimiter getRateLimiter(RestyCommand restyCommand) {
String key = restyCommand.getServiceMethod() + "@" + restyCommand.getPath();
RateLimiter rateLimiter = limiterMap.get(key);
if (rateLimiter == null) {
limiterMap.putIfAbsent(key, RateLimiter.create(restyCommand.getRestyCommandConfig().getLimit()));
rateLimiter = limiterMap.get(key);
} else if (rateLimiter.getRate() != restyCommand.getRestyCommandConfig().getLimit()) {
// 更新rate
rateLimiter.setRate(restyCommand.getRestyCommandConfig().getLimit());
}
return rateLimiter;
}
use of com.google.common.util.concurrent.RateLimiter in project cassandra by apache.
the class SimpleClientPerfTest method perfTest.
@SuppressWarnings({ "UnstableApiUsage", "UseOfSystemOutOrSystemErr", "ResultOfMethodCallIgnored" })
public void perfTest(SizeCaps requestCaps, SizeCaps responseCaps, AssertUtil.ThrowingSupplier<SimpleClient> clientSupplier, ProtocolVersion version) throws Throwable {
ResultMessage.Rows response = generateRows(0, responseCaps);
QueryMessage requestMessage = generateQueryMessage(0, requestCaps, version);
Envelope message = requestMessage.encode(version);
int requestSize = message.body.readableBytes();
message.release();
message = response.encode(version);
int responseSize = message.body.readableBytes();
message.release();
Server server = new Server.Builder().withHost(address).withPort(port).build();
ClientMetrics.instance.init(Collections.singleton(server));
server.start();
Message.Type.QUERY.unsafeSetCodec(new Message.Codec<QueryMessage>() {
public QueryMessage decode(ByteBuf body, ProtocolVersion version) {
QueryMessage queryMessage = QueryMessage.codec.decode(body, version);
return new QueryMessage(queryMessage.query, queryMessage.options) {
protected Message.Response execute(QueryState state, long queryStartNanoTime, boolean traceRequest) {
// unused
int idx = Integer.parseInt(queryMessage.query);
return generateRows(idx, responseCaps);
}
};
}
public void encode(QueryMessage queryMessage, ByteBuf dest, ProtocolVersion version) {
QueryMessage.codec.encode(queryMessage, dest, version);
}
public int encodedSize(QueryMessage queryMessage, ProtocolVersion version) {
return 0;
}
});
int threads = 1;
ExecutorService executor = Executors.newFixedThreadPool(threads);
AtomicReference<Throwable> error = new AtomicReference<>();
CountDownLatch signal = new CountDownLatch(1);
AtomicBoolean measure = new AtomicBoolean(false);
DescriptiveStatistics stats = new DescriptiveStatistics();
Lock lock = new ReentrantLock();
RateLimiter limiter = RateLimiter.create(2000);
AtomicLong overloadedExceptions = new AtomicLong(0);
// TODO: exercise client -> server large messages
for (int t = 0; t < threads; t++) {
executor.execute(() -> {
try (SimpleClient client = clientSupplier.get()) {
while (!executor.isShutdown() && error.get() == null) {
List<Message.Request> messages = new ArrayList<>();
for (int j = 0; j < 1; j++) messages.add(requestMessage);
if (measure.get()) {
try {
limiter.acquire();
long nanoStart = nanoTime();
client.execute(messages);
long elapsed = nanoTime() - nanoStart;
lock.lock();
try {
stats.addValue(TimeUnit.NANOSECONDS.toMicros(elapsed));
} finally {
lock.unlock();
}
} catch (RuntimeException e) {
if (Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException)) {
overloadedExceptions.incrementAndGet();
} else {
throw e;
}
}
} else {
try {
limiter.acquire();
// warm-up
client.execute(messages);
} catch (RuntimeException e) {
// Ignore overloads during warmup...
if (!Throwables.anyCauseMatches(e, cause -> cause instanceof OverloadedException)) {
throw e;
}
}
}
}
} catch (Throwable e) {
e.printStackTrace();
error.set(e);
signal.countDown();
}
});
}
Assert.assertFalse(signal.await(30, TimeUnit.SECONDS));
measure.set(true);
Assert.assertFalse(signal.await(60, TimeUnit.SECONDS));
executor.shutdown();
executor.awaitTermination(10, TimeUnit.SECONDS);
System.out.println("requestSize = " + requestSize);
System.out.println("responseSize = " + responseSize);
System.out.println("Latencies (in microseconds)");
System.out.println("Elements: " + stats.getN());
System.out.println("Mean: " + stats.getMean());
System.out.println("Variance: " + stats.getVariance());
System.out.println("Median: " + stats.getPercentile(0.5));
System.out.println("90p: " + stats.getPercentile(0.90));
System.out.println("95p: " + stats.getPercentile(0.95));
System.out.println("99p: " + stats.getPercentile(0.99));
System.out.println("Max: " + stats.getMax());
System.out.println("Failed due to overload: " + overloadedExceptions.get());
server.stop();
}
use of com.google.common.util.concurrent.RateLimiter in project cassandra by apache.
the class CompactionTask method runMayThrow.
/**
* For internal use and testing only. The rest of the system should go through the submit* methods,
* which are properly serialized.
* Caller is in charge of marking/unmarking the sstables as compacting.
*/
protected void runMayThrow() throws Exception {
// it is not empty, it may compact down to nothing if all rows are deleted.
assert transaction != null;
if (transaction.originals().isEmpty())
return;
// Note that the current compaction strategy, is not necessarily the one this task was created under.
// This should be harmless; see comments to CFS.maybeReloadCompactionStrategy.
CompactionStrategyManager strategy = cfs.getCompactionStrategyManager();
if (DatabaseDescriptor.isSnapshotBeforeCompaction()) {
long epochMilli = currentTimeMillis();
Instant creationTime = Instant.ofEpochMilli(epochMilli);
cfs.snapshotWithoutFlush(epochMilli + "-compact-" + cfs.name, creationTime);
}
try (CompactionController controller = getCompactionController(transaction.originals())) {
final Set<SSTableReader> fullyExpiredSSTables = controller.getFullyExpiredSSTables();
// select SSTables to compact based on available disk space.
buildCompactionCandidatesForAvailableDiskSpace(fullyExpiredSSTables);
// sanity check: all sstables must belong to the same cfs
assert !Iterables.any(transaction.originals(), new Predicate<SSTableReader>() {
@Override
public boolean apply(SSTableReader sstable) {
return !sstable.descriptor.cfname.equals(cfs.name);
}
});
UUID taskId = transaction.opId();
// new sstables from flush can be added during a compaction, but only the compaction can remove them,
// so in our single-threaded compaction world this is a valid way of determining if we're compacting
// all the sstables (that existed when we started)
StringBuilder ssTableLoggerMsg = new StringBuilder("[");
for (SSTableReader sstr : transaction.originals()) {
ssTableLoggerMsg.append(String.format("%s:level=%d, ", sstr.getFilename(), sstr.getSSTableLevel()));
}
ssTableLoggerMsg.append("]");
logger.info("Compacting ({}) {}", taskId, ssTableLoggerMsg);
RateLimiter limiter = CompactionManager.instance.getRateLimiter();
long start = nanoTime();
long startTime = currentTimeMillis();
long totalKeysWritten = 0;
long estimatedKeys = 0;
long inputSizeBytes;
long timeSpentWritingKeys;
Set<SSTableReader> actuallyCompact = Sets.difference(transaction.originals(), fullyExpiredSSTables);
Collection<SSTableReader> newSStables;
long[] mergedRowCounts;
long totalSourceCQLRows;
int nowInSec = FBUtilities.nowInSeconds();
try (Refs<SSTableReader> refs = Refs.ref(actuallyCompact);
AbstractCompactionStrategy.ScannerList scanners = strategy.getScanners(actuallyCompact);
CompactionIterator ci = new CompactionIterator(compactionType, scanners.scanners, controller, nowInSec, taskId)) {
long lastCheckObsoletion = start;
inputSizeBytes = scanners.getTotalCompressedSize();
double compressionRatio = scanners.getCompressionRatio();
if (compressionRatio == MetadataCollector.NO_COMPRESSION_RATIO)
compressionRatio = 1.0;
long lastBytesScanned = 0;
activeCompactions.beginCompaction(ci);
try (CompactionAwareWriter writer = getCompactionAwareWriter(cfs, getDirectories(), transaction, actuallyCompact)) {
// block until the below exception is thrown and the transaction is cancelled.
if (!controller.cfs.getCompactionStrategyManager().isActive())
throw new CompactionInterruptedException(ci.getCompactionInfo());
estimatedKeys = writer.estimatedKeys();
while (ci.hasNext()) {
if (writer.append(ci.next()))
totalKeysWritten++;
long bytesScanned = scanners.getTotalBytesScanned();
// Rate limit the scanners, and account for compression
CompactionManager.compactionRateLimiterAcquire(limiter, bytesScanned, lastBytesScanned, compressionRatio);
lastBytesScanned = bytesScanned;
if (nanoTime() - lastCheckObsoletion > TimeUnit.MINUTES.toNanos(1L)) {
controller.maybeRefreshOverlaps();
lastCheckObsoletion = nanoTime();
}
}
timeSpentWritingKeys = TimeUnit.NANOSECONDS.toMillis(nanoTime() - start);
// point of no return
newSStables = writer.finish();
} finally {
activeCompactions.finishCompaction(ci);
mergedRowCounts = ci.getMergedRowCounts();
totalSourceCQLRows = ci.getTotalSourceCQLRows();
}
}
if (transaction.isOffline())
return;
// log a bunch of statistics about the result and save to system table compaction_history
long durationInNano = nanoTime() - start;
long dTime = TimeUnit.NANOSECONDS.toMillis(durationInNano);
long startsize = inputSizeBytes;
long endsize = SSTableReader.getTotalBytes(newSStables);
double ratio = (double) endsize / (double) startsize;
StringBuilder newSSTableNames = new StringBuilder();
for (SSTableReader reader : newSStables) newSSTableNames.append(reader.descriptor.baseFilename()).append(",");
long totalSourceRows = 0;
for (int i = 0; i < mergedRowCounts.length; i++) totalSourceRows += mergedRowCounts[i] * (i + 1);
String mergeSummary = updateCompactionHistory(cfs.keyspace.getName(), cfs.getTableName(), mergedRowCounts, startsize, endsize);
logger.info(String.format("Compacted (%s) %d sstables to [%s] to level=%d. %s to %s (~%d%% of original) in %,dms. Read Throughput = %s, Write Throughput = %s, Row Throughput = ~%,d/s. %,d total partitions merged to %,d. Partition merge counts were {%s}. Time spent writing keys = %,dms", taskId, transaction.originals().size(), newSSTableNames.toString(), getLevel(), FBUtilities.prettyPrintMemory(startsize), FBUtilities.prettyPrintMemory(endsize), (int) (ratio * 100), dTime, FBUtilities.prettyPrintMemoryPerSecond(startsize, durationInNano), FBUtilities.prettyPrintMemoryPerSecond(endsize, durationInNano), (int) totalSourceCQLRows / (TimeUnit.NANOSECONDS.toSeconds(durationInNano) + 1), totalSourceRows, totalKeysWritten, mergeSummary, timeSpentWritingKeys));
if (logger.isTraceEnabled()) {
logger.trace("CF Total Bytes Compacted: {}", FBUtilities.prettyPrintMemory(CompactionTask.addToTotalBytesCompacted(endsize)));
logger.trace("Actual #keys: {}, Estimated #keys:{}, Err%: {}", totalKeysWritten, estimatedKeys, ((double) (totalKeysWritten - estimatedKeys) / totalKeysWritten));
}
cfs.getCompactionStrategyManager().compactionLogger.compaction(startTime, transaction.originals(), currentTimeMillis(), newSStables);
// update the metrics
cfs.metric.compactionBytesWritten.inc(endsize);
}
}
Aggregations