use of com.google.common.util.concurrent.RateLimiter in project opennms by OpenNMS.
the class SyslogKafkaElasticsearchBufferingIT method testMinionSyslogsOverKafkaToEsRest.
@Test
public void testMinionSyslogsOverKafkaToEsRest() throws Exception {
Date startOfTest = new Date();
int numMessages = 1000;
int packetsPerSecond = 50;
InetSocketAddress minionSshAddr = testEnvironment.getServiceAddress(ContainerAlias.MINION, 8201);
InetSocketAddress opennmsSshAddr = testEnvironment.getServiceAddress(ContainerAlias.OPENNMS, 8101);
InetSocketAddress kafkaAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 9092);
InetSocketAddress zookeeperAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 2181);
// Install the Kafka syslog and trap handlers on the Minion system
installFeaturesOnMinion(minionSshAddr, kafkaAddress);
// Install the Kafka and Elasticsearch features on the OpenNMS system
installFeaturesOnOpenNMS(opennmsSshAddr, kafkaAddress, zookeeperAddress);
final String sender = testEnvironment.getContainerInfo(ContainerAlias.SNMPD).networkSettings().ipAddress();
// Wait for the minion to show up
await().atMost(90, SECONDS).pollInterval(5, SECONDS).until(DaoUtils.countMatchingCallable(getDaoFactory().getDao(MinionDaoHibernate.class), new CriteriaBuilder(OnmsMinion.class).gt("lastUpdated", startOfTest).eq("location", "MINION").toCriteria()), is(1));
// Shut down OpenNMS. Syslog messages will accumulate in the Kafka
// message queue while it is down.
stopContainer(ContainerAlias.OPENNMS);
LOG.info("Warming up syslog routes by sending 100 packets");
// Warm up the routes
sendMessage(ContainerAlias.MINION, sender, 100);
for (int i = 0; i < 20; i++) {
LOG.info("Slept for " + i + " seconds");
Thread.sleep(1000);
}
// Make sure that this evenly divides into the numMessages
final int chunk = 50;
// Make sure that this is an even multiple of chunk
final int logEvery = 100;
int count = 0;
long start = System.currentTimeMillis();
// Send ${numMessages} syslog messages
RateLimiter limiter = RateLimiter.create(packetsPerSecond);
for (int i = 0; i < (numMessages / chunk); i++) {
limiter.acquire(chunk);
sendMessage(ContainerAlias.MINION, sender, chunk);
count += chunk;
if (count % logEvery == 0) {
long mid = System.currentTimeMillis();
LOG.info(String.format("Sent %d packets in %d milliseconds", logEvery, mid - start));
start = System.currentTimeMillis();
}
}
// Start OpenNMS. It should begin to consume syslog messages and forward
// them to Elasticsearch without dropping messages.
startContainer(ContainerAlias.OPENNMS);
// 100 warm-up messages plus ${numMessages} messages
pollForElasticsearchEventsUsingJest(this::getEs5Address, 100 + numMessages);
}
use of com.google.common.util.concurrent.RateLimiter in project bookkeeper by apache.
the class TestNonBlockingReadsMultiReader method testMultiReaders.
@Test(timeout = 60000)
public void testMultiReaders() throws Exception {
String name = "distrlog-multireaders";
final RateLimiter limiter = RateLimiter.create(1000);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.setOutputBufferSize(0);
confLocal.setImmediateFlushEnabled(true);
DistributedLogManager dlmwrite = createNewDLM(confLocal, name);
final AsyncLogWriter writer = dlmwrite.startAsyncLogSegmentNonPartitioned();
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(0)));
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(1)));
final AtomicInteger writeCount = new AtomicInteger(2);
DistributedLogManager dlmread = createNewDLM(conf, name);
BKSyncLogReader reader0 = (BKSyncLogReader) dlmread.getInputStream(0);
try {
ReaderThread[] readerThreads = new ReaderThread[1];
readerThreads[0] = new ReaderThread("reader0-non-blocking", reader0, false);
// readerThreads[1] = new ReaderThread("reader1-non-blocking", reader0, false);
final AtomicBoolean running = new AtomicBoolean(true);
Thread writerThread = new Thread("WriteThread") {
@Override
public void run() {
try {
long txid = 2;
DLSN dlsn = DLSN.InvalidDLSN;
while (running.get()) {
limiter.acquire();
long curTxId = txid++;
dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
writeCount.incrementAndGet();
if (curTxId % 1000 == 0) {
LOG.info("writer write {}", curTxId);
}
}
LOG.info("Completed writing record at {}", dlsn);
Utils.close(writer);
} catch (DLInterruptedException die) {
Thread.currentThread().interrupt();
} catch (Exception e) {
}
}
};
for (ReaderThread rt : readerThreads) {
rt.start();
}
writerThread.start();
TimeUnit.SECONDS.sleep(5);
LOG.info("Stopping writer");
running.set(false);
writerThread.join();
LOG.info("Writer stopped after writing {} records, waiting for reader to complete", writeCount.get());
while (writeCount.get() > (readerThreads[0].getReadCount())) {
LOG.info("Write Count = {}, Read Count = {}", new Object[] { writeCount.get(), readerThreads[0].getReadCount() });
TimeUnit.MILLISECONDS.sleep(100);
}
assertEquals(writeCount.get(), (readerThreads[0].getReadCount()));
for (ReaderThread readerThread : readerThreads) {
readerThread.stopReading();
}
} finally {
dlmwrite.close();
reader0.close();
dlmread.close();
}
}
use of com.google.common.util.concurrent.RateLimiter in project bookkeeper by apache.
the class UpdateLedgerOp method updateBookieIdInLedgers.
/**
* Update the bookie id present in the ledger metadata.
*
* @param oldBookieId
* current bookie id
* @param newBookieId
* new bookie id
* @param rate
* number of ledgers updating per second (default 5 per sec)
* @param limit
* maximum number of ledgers to update (default: no limit). Stop
* update if reaching limit
* @param progressable
* report progress of the ledger updates
* @throws IOException
* if there is an error when updating bookie id in ledger
* metadata
* @throws InterruptedException
* interrupted exception when update ledger meta
*/
public void updateBookieIdInLedgers(final BookieSocketAddress oldBookieId, final BookieSocketAddress newBookieId, final int rate, final int limit, final UpdateLedgerNotifier progressable) throws IOException {
final ExecutorService executor = Executors.newSingleThreadExecutor(new DefaultThreadFactory("UpdateLedgerThread", true));
final AtomicInteger issuedLedgerCnt = new AtomicInteger();
final AtomicInteger updatedLedgerCnt = new AtomicInteger();
final Future<?> updateBookieCb = executor.submit(new Runnable() {
@Override
public void run() {
updateLedgers(oldBookieId, newBookieId, rate, limit, progressable);
}
private void updateLedgers(final BookieSocketAddress oldBookieId, final BookieSocketAddress newBookieId, final int rate, final int limit, final UpdateLedgerNotifier progressable) {
try {
final AtomicBoolean stop = new AtomicBoolean(false);
final Set<Long> outstandings = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>());
final RateLimiter throttler = RateLimiter.create(rate);
final Iterator<Long> ledgerItr = admin.listLedgers().iterator();
final CountDownLatch syncObj = new CountDownLatch(1);
// iterate through all the ledgers
while (ledgerItr.hasNext() && !stop.get()) {
// throttler to control updates per second
throttler.acquire();
final Long lId = ledgerItr.next();
final ReadLedgerMetadataCb readCb = new ReadLedgerMetadataCb(bkc, lId, oldBookieId, newBookieId);
outstandings.add(lId);
FutureCallback<Void> updateLedgerCb = new UpdateLedgerCb(lId, stop, issuedLedgerCnt, updatedLedgerCnt, outstandings, syncObj, progressable);
Futures.addCallback(readCb.getFutureListener(), updateLedgerCb);
issuedLedgerCnt.incrementAndGet();
if (limit != Integer.MIN_VALUE && issuedLedgerCnt.get() >= limit || !ledgerItr.hasNext()) {
stop.set(true);
}
bkc.getLedgerManager().readLedgerMetadata(lId, readCb);
}
// waiting till all the issued ledgers are finished
syncObj.await();
} catch (IOException ioe) {
LOG.error("Exception while updating ledger", ioe);
throw new RuntimeException("Exception while updating ledger", ioe.getCause());
} catch (InterruptedException ie) {
LOG.error("Exception while updating ledger metadata", ie);
Thread.currentThread().interrupt();
throw new RuntimeException("Exception while updating ledger", ie.getCause());
}
}
});
try {
// Wait to finish the issued ledgers.
updateBookieCb.get();
} catch (ExecutionException ee) {
throw new IOException("Exception while updating ledger", ee);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException("Exception while updating ledger", ie);
} finally {
executor.shutdown();
}
}
use of com.google.common.util.concurrent.RateLimiter in project web3sdk by FISCO-BCOS.
the class Perfomance method main.
public static void main(String[] args) throws Exception {
if (args.length < 6) {
System.out.println("参数: 请求方 接收方 总请求量 发送TPS 包大小 超时时间");
return;
}
String from = args[0];
String to = args[1];
Integer count = Integer.parseInt(args[2]);
Integer tps = Integer.parseInt(args[3]);
Integer packageSize = Integer.parseInt(args[4]);
Integer timeout = Integer.parseInt(args[5]);
logger.debug("初始化");
ApplicationContext context = new ClassPathXmlApplicationContext("classpath:applicationContext.xml");
Service service = context.getBean(Service.class);
service.setPushCallback(new PushCallback());
service.run();
System.out.println("3s后开始测试...");
Thread.sleep(1000);
System.out.println("2s后开始测试...");
Thread.sleep(1000);
System.out.println("1s后开始测试...");
Thread.sleep(1000);
System.out.println("开始测试");
System.out.println("===================================================================");
ChannelRequest request = new ChannelRequest();
request.setAppName("");
request.setBankNO("");
request.setFromOrg(from);
request.setOrgApp("");
request.setTimeout(0);
request.setToOrg(to);
request.setTimeout(timeout);
request.setVersion("");
String message = "";
for (Integer i = 0; i < packageSize; ++i) {
message += "z";
}
Map<Integer, RequestTimer> resultMap = new ConcurrentHashMap<Integer, RequestTimer>();
PerfomanceCollector collector = new PerfomanceCollector();
collector.total = count;
collector.resultMap = resultMap;
collector.startTimestamp = System.currentTimeMillis();
collector.tps = tps;
collector.packageSize = packageSize;
RateLimiter limiter = RateLimiter.create((double) tps);
for (Integer seq = 0; seq < count; ++seq) {
limiter.acquire();
if ((seq + 1) % (count / 10) == 0) {
System.out.println("已发送:" + String.valueOf((seq + 1) * 100 / count) + "%");
}
request.setContent(message);
request.setMessageID(service.newSeq());
RequestTimer timer = new RequestTimer();
timer.sendTimestamp = System.currentTimeMillis();
resultMap.put(seq, timer);
PerfomanceCallback callback = new PerfomanceCallback();
callback.collector = collector;
service.asyncSendChannelMessage(request, callback);
}
System.out.println("共发送:" + String.valueOf(count) + "条");
}
use of com.google.common.util.concurrent.RateLimiter in project web3sdk by FISCO-BCOS.
the class PerformanceDTTest method userTransferTest.
public void userTransferTest(BigInteger count, BigInteger qps, BigInteger deci, BigInteger queryAccountQPS) {
System.out.println("Start UserTransfer test...");
System.out.println("===================================================================");
String dirName = "./.signed_transactions";
File dir = new File(dirName);
if (dir.exists()) {
File[] fileList = dir.listFiles();
for (File file : fileList) {
if (!file.delete()) {
System.out.printf("Can't clean %s%n", dirName);
System.exit(0);
}
}
} else {
if (!dir.mkdir()) {
System.out.printf("Can't create directory %s%n", dirName);
System.exit(0);
}
}
try {
System.out.println(dateFormat.format(new Date()) + " Querying account state...");
List<DagTransferUser> allUser = dagUserMgr.getUserList();
ThreadPoolTaskExecutor threadPool = new ThreadPoolTaskExecutor();
threadPool.setCorePoolSize(200);
threadPool.setMaxPoolSize(500);
threadPool.setQueueCapacity(Math.max(count.intValue(), allUser.size()));
threadPool.initialize();
final DagTransfer _dagTransfer = dagTransfer;
AtomicInteger geted = new AtomicInteger(0);
RateLimiter queryAccountLimiter = RateLimiter.create(queryAccountQPS.intValue());
for (int i = 0; i < allUser.size(); ++i) {
final Integer _i = i;
queryAccountLimiter.acquire();
threadPool.execute(new Runnable() {
@Override
public void run() {
try {
Tuple2<BigInteger, BigInteger> result = _dagTransfer.userBalance(allUser.get(_i).getUser()).send();
if (result.getValue1().compareTo(new BigInteger("0")) == 0) {
allUser.get(_i).setAmount(result.getValue2());
} else {
System.out.println(" Query failed, user is " + allUser.get(_i).getUser());
System.exit(0);
}
int all = geted.incrementAndGet();
if (all >= allUser.size()) {
System.out.println(dateFormat.format(new Date()) + " Query account finished");
}
} catch (Exception e) {
System.out.println(" Query failed, user is " + allUser.get(_i).getUser());
System.exit(0);
}
}
});
}
while (geted.get() < allUser.size()) {
Thread.sleep(50);
}
System.out.println("");
AtomicLong signed = new AtomicLong(0);
int segmentSize = 200000;
int segmentCount = count.intValue() / segmentSize;
if (count.intValue() % segmentSize != 0) {
segmentCount++;
}
AtomicLong totalWrited = new AtomicLong(0);
for (int i = 0; i < segmentCount; ++i) {
int start = i * segmentSize;
int end = start + segmentSize;
if (end > count.intValue()) {
end = count.intValue();
}
String fileName = dirName + "/signed_transactions_" + i;
Lock fileLock = new ReentrantLock();
BufferedWriter writer = null;
AtomicLong writed = new AtomicLong(0);
final int totalWrite = end - start;
try {
writer = new BufferedWriter(new FileWriter(fileName));
for (int j = start; j < end; ++j) {
final int index = j;
final BufferedWriter finalWriter = writer;
threadPool.execute(new Runnable() {
@Override
public void run() {
while (true) {
DagTransferUser from = dagUserMgr.getFrom(index);
DagTransferUser to = dagUserMgr.getTo(index);
if ((deci.intValue() > 0) && (deci.intValue() >= (index % 10 + 1))) {
to = dagUserMgr.getNext(index);
}
Random random = new Random();
int r = random.nextInt(100) + 1;
BigInteger amount = BigInteger.valueOf(r);
try {
String signedTransaction = dagTransfer.userTransferSeq(from.getUser(), to.getUser(), amount);
String content = String.format("%s %d %d%n", signedTransaction, index, r);
fileLock.lock();
finalWriter.write(content);
long totalSigned = signed.incrementAndGet();
if (totalSigned % (count.longValue() / 10) == 0) {
System.out.println("Signed transaction: " + String.valueOf(totalSigned * 100 / count.longValue()) + "%");
}
long writedCount = writed.incrementAndGet();
totalWrited.incrementAndGet();
if (writedCount >= totalWrite) {
finalWriter.close();
}
break;
} catch (Exception e) {
e.printStackTrace();
continue;
} finally {
fileLock.unlock();
}
}
}
});
}
} catch (Exception e) {
if (writer != null) {
writer.close();
}
e.printStackTrace();
System.exit(0);
} finally {
if ((writed.get() >= totalWrite) && (writer != null)) {
writer.close();
writer = null;
}
}
}
while (totalWrited.get() < count.intValue()) {
Thread.sleep(50);
}
System.out.print(dateFormat.format(new Date()) + " Prepare transactions finished");
System.out.println("");
long sent = 0;
File[] fileList = dir.listFiles();
System.out.println(dateFormat.format(new Date()) + " Sending signed transactions...");
long startTime = System.currentTimeMillis();
collector.setStartTimestamp(startTime);
for (int i = 0; i < fileList.length; ++i) {
BufferedReader reader = null;
try {
reader = new BufferedReader(new FileReader(fileList[i]));
List<String> signedTransactions = new ArrayList<String>();
List<PerformanceDTCallback> callbacks = new ArrayList<PerformanceDTCallback>();
String line = null;
while ((line = reader.readLine()) != null) {
String[] fields = line.split(" ");
signedTransactions.add(fields[0]);
int index = Integer.parseInt(fields[1]);
BigInteger amount = new BigInteger(fields[2]);
DagTransferUser from = dagUserMgr.getFrom(index);
DagTransferUser to = dagUserMgr.getTo(index);
if ((deci.intValue() > 0) && (deci.intValue() >= (index % 10 + 1))) {
to = dagUserMgr.getNext(index);
}
PerformanceDTCallback callback = new PerformanceDTCallback();
callback.setCallBackType("transfer");
callback.setCollector(collector);
callback.setDagUserMgr(getDagUserMgr());
callback.setFromUser(from);
callback.setToUser(to);
callback.setAmount(amount);
callbacks.add(callback);
}
latch = new CountDownLatch(signedTransactions.size());
RateLimiter limiter = RateLimiter.create(qps.intValue());
for (int j = 0; j < signedTransactions.size(); ++j) {
limiter.acquire();
final int index = j;
threadPool.execute(new Runnable() {
@Override
public void run() {
while (true) {
try {
callbacks.get(index).recordStartTime();
transactionManager.sendTransaction(signedTransactions.get(index), callbacks.get(index));
break;
} catch (Exception e) {
logger.error("Send transaction error: ", e);
continue;
}
}
latch.countDown();
}
});
}
latch.await();
long elapsed = System.currentTimeMillis() - startTime;
sent += signedTransactions.size();
double sendSpeed = sent / ((double) elapsed / 1000);
System.out.println("Already sent: " + sent + "/" + count + " transactions" + ",QPS=" + sendSpeed);
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
} finally {
if (reader != null) {
reader.close();
}
}
}
while (!collector.isEnd()) {
Thread.sleep(2000);
logger.info(" received: {}, total: {}", collector.getReceived().intValue(), collector.getTotal());
}
logger.info("End to send");
System.out.println(dateFormat.format(new Date()) + " Verifying result...");
veryTransferData(threadPool, queryAccountQPS);
System.exit(0);
} catch (Exception e) {
e.printStackTrace();
System.exit(0);
}
}
Aggregations