Search in sources :

Example 1 with RateLimiter

use of com.google.common.util.concurrent.RateLimiter in project opennms by OpenNMS.

the class PingSweepRpcModule method execute.

@Override
public CompletableFuture<PingSweepResponseDTO> execute(PingSweepRequestDTO request) {
    final Pinger pinger = pingerFactory.getInstance();
    final PingSweepResultTracker tracker = new PingSweepResultTracker();
    String location = request.getLocation();
    int packetSize = request.getPacketSize();
    List<IPPollRange> ranges = new ArrayList<>();
    for (IPRangeDTO dto : request.getIpRanges()) {
        IPPollRange pollRange = new IPPollRange(null, location, dto.getBegin(), dto.getEnd(), dto.getTimeout(), dto.getRetries());
        ranges.add(pollRange);
    }
    // Use a RateLimiter to limit the ping packets per second that we send
    RateLimiter limiter = RateLimiter.create(request.getPacketsPerSecond());
    List<IPPollAddress> addresses = StreamSupport.stream(getAddresses(ranges).spliterator(), false).filter(j -> j.getAddress() != null).collect(Collectors.toList());
    return CompletableFuture.supplyAsync(() -> {
        addresses.stream().forEach(pollAddress -> {
            try {
                tracker.expectCallbackFor(pollAddress.getAddress());
                limiter.acquire();
                pinger.ping(pollAddress.getAddress(), pollAddress.getTimeout(), pollAddress.getRetries(), packetSize, 1, tracker);
            } catch (Exception e) {
                tracker.handleError(pollAddress.getAddress(), null, e);
                tracker.completeExceptionally(e);
            }
        });
        try {
            tracker.getLatch().await();
        } catch (InterruptedException e) {
            throw Throwables.propagate(e);
        }
        tracker.complete();
        return tracker.getResponse();
    }, executor);
}
Also used : ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PingerFactory(org.opennms.netmgt.icmp.PingerFactory) Autowired(org.springframework.beans.factory.annotation.Autowired) CompletableFuture(java.util.concurrent.CompletableFuture) RateLimiter(com.google.common.util.concurrent.RateLimiter) ArrayList(java.util.ArrayList) InetAddress(java.net.InetAddress) EchoPacket(org.opennms.netmgt.icmp.EchoPacket) PingResponseCallback(org.opennms.netmgt.icmp.PingResponseCallback) IPPollRange(org.opennms.netmgt.model.discovery.IPPollRange) StreamSupport(java.util.stream.StreamSupport) ThreadFactory(java.util.concurrent.ThreadFactory) ExecutorService(java.util.concurrent.ExecutorService) IteratorUtils(org.opennms.core.utils.IteratorUtils) Iterator(java.util.Iterator) Set(java.util.Set) Throwables(com.google.common.base.Throwables) Pinger(org.opennms.netmgt.icmp.Pinger) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Sets(com.google.common.collect.Sets) IPPollAddress(org.opennms.netmgt.model.discovery.IPPollAddress) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) AbstractXmlRpcModule(org.opennms.core.rpc.xml.AbstractXmlRpcModule) List(java.util.List) Component(org.springframework.stereotype.Component) Pinger(org.opennms.netmgt.icmp.Pinger) ArrayList(java.util.ArrayList) RateLimiter(com.google.common.util.concurrent.RateLimiter) IPPollAddress(org.opennms.netmgt.model.discovery.IPPollAddress) IPPollRange(org.opennms.netmgt.model.discovery.IPPollRange)

Example 2 with RateLimiter

use of com.google.common.util.concurrent.RateLimiter in project opennms by OpenNMS.

the class SyslogKafkaElasticsearch2IT method testMinionSyslogsOverKafkaToEsRest.

@Test
public void testMinionSyslogsOverKafkaToEsRest() throws Exception {
    Date startOfTest = new Date();
    int numMessages = 10000;
    int packetsPerSecond = 500;
    InetSocketAddress minionSshAddr = testEnvironment.getServiceAddress(ContainerAlias.MINION, 8201);
    InetSocketAddress esRestAddr = testEnvironment.getServiceAddress(ContainerAlias.ELASTICSEARCH_2, 9200);
    InetSocketAddress opennmsSshAddr = testEnvironment.getServiceAddress(ContainerAlias.OPENNMS, 8101);
    InetSocketAddress kafkaAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 9092);
    InetSocketAddress zookeeperAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 2181);
    // Install the Kafka syslog and trap handlers on the Minion system
    installFeaturesOnMinion(minionSshAddr, kafkaAddress);
    // Install the Kafka and Elasticsearch features on the OpenNMS system
    installFeaturesOnOpenNMS(opennmsSshAddr, kafkaAddress, zookeeperAddress);
    final String sender = testEnvironment.getContainerInfo(ContainerAlias.SNMPD).networkSettings().ipAddress();
    // Wait for the minion to show up
    await().atMost(90, SECONDS).pollInterval(5, SECONDS).until(DaoUtils.countMatchingCallable(getDaoFactory().getDao(MinionDaoHibernate.class), new CriteriaBuilder(OnmsMinion.class).gt("lastUpdated", startOfTest).eq("location", "MINION").toCriteria()), is(1));
    LOG.info("Warming up syslog routes by sending 100 packets");
    // Warm up the routes
    sendMessage(ContainerAlias.MINION, sender, 100);
    for (int i = 0; i < 10; i++) {
        LOG.info("Slept for " + i + " seconds");
        Thread.sleep(1000);
    }
    LOG.info("Resetting statistics");
    resetRouteStatistics(opennmsSshAddr, minionSshAddr);
    for (int i = 0; i < 20; i++) {
        LOG.info("Slept for " + i + " seconds");
        Thread.sleep(1000);
    }
    // Make sure that this evenly divides into the numMessages
    final int chunk = 500;
    // Make sure that this is an even multiple of chunk
    final int logEvery = 1000;
    int count = 0;
    long start = System.currentTimeMillis();
    // Send ${numMessages} syslog messages
    RateLimiter limiter = RateLimiter.create(packetsPerSecond);
    for (int i = 0; i < (numMessages / chunk); i++) {
        limiter.acquire(chunk);
        sendMessage(ContainerAlias.MINION, sender, chunk);
        count += chunk;
        if (count % logEvery == 0) {
            long mid = System.currentTimeMillis();
            LOG.info(String.format("Sent %d packets in %d milliseconds", logEvery, mid - start));
            start = System.currentTimeMillis();
        }
    }
    // 100 warm-up messages plus ${numMessages} messages
    pollForElasticsearchEventsUsingJest(esRestAddr, 100 + numMessages);
}
Also used : CriteriaBuilder(org.opennms.core.criteria.CriteriaBuilder) OnmsMinion(org.opennms.netmgt.model.minion.OnmsMinion) InetSocketAddress(java.net.InetSocketAddress) Date(java.util.Date) RateLimiter(com.google.common.util.concurrent.RateLimiter) Test(org.junit.Test)

Example 3 with RateLimiter

use of com.google.common.util.concurrent.RateLimiter in project opennms by OpenNMS.

the class SyslogKafkaElasticsearch5IT method testMinionSyslogsOverKafkaToEsRest.

@Test
public void testMinionSyslogsOverKafkaToEsRest() throws Exception {
    Date startOfTest = new Date();
    int numMessages = 10000;
    int packetsPerSecond = 500;
    InetSocketAddress minionSshAddr = testEnvironment.getServiceAddress(ContainerAlias.MINION, 8201);
    InetSocketAddress esRestAddr = testEnvironment.getServiceAddress(ContainerAlias.ELASTICSEARCH_5, 9200);
    InetSocketAddress opennmsSshAddr = testEnvironment.getServiceAddress(ContainerAlias.OPENNMS, 8101);
    InetSocketAddress kafkaAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 9092);
    InetSocketAddress zookeeperAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 2181);
    // Install the Kafka syslog and trap handlers on the Minion system
    installFeaturesOnMinion(minionSshAddr, kafkaAddress);
    // Install the Kafka and Elasticsearch features on the OpenNMS system
    installFeaturesOnOpenNMS(opennmsSshAddr, kafkaAddress, zookeeperAddress);
    final String sender = testEnvironment.getContainerInfo(ContainerAlias.SNMPD).networkSettings().ipAddress();
    // Wait for the minion to show up
    await().atMost(90, SECONDS).pollInterval(5, SECONDS).until(DaoUtils.countMatchingCallable(getDaoFactory().getDao(MinionDaoHibernate.class), new CriteriaBuilder(OnmsMinion.class).gt("lastUpdated", startOfTest).eq("location", "MINION").toCriteria()), is(1));
    LOG.info("Warming up syslog routes by sending 100 packets");
    // Warm up the routes
    sendMessage(ContainerAlias.MINION, sender, 100);
    for (int i = 0; i < 10; i++) {
        LOG.info("Slept for " + i + " seconds");
        Thread.sleep(1000);
    }
    LOG.info("Resetting statistics");
    resetRouteStatistics(opennmsSshAddr, minionSshAddr);
    for (int i = 0; i < 20; i++) {
        LOG.info("Slept for " + i + " seconds");
        Thread.sleep(1000);
    }
    // Make sure that this evenly divides into the numMessages
    final int chunk = 500;
    // Make sure that this is an even multiple of chunk
    final int logEvery = 1000;
    int count = 0;
    long start = System.currentTimeMillis();
    // Send ${numMessages} syslog messages
    RateLimiter limiter = RateLimiter.create(packetsPerSecond);
    for (int i = 0; i < (numMessages / chunk); i++) {
        limiter.acquire(chunk);
        sendMessage(ContainerAlias.MINION, sender, chunk);
        count += chunk;
        if (count % logEvery == 0) {
            long mid = System.currentTimeMillis();
            LOG.info(String.format("Sent %d packets in %d milliseconds", logEvery, mid - start));
            start = System.currentTimeMillis();
        }
    }
    // 100 warm-up messages plus ${numMessages} messages
    pollForElasticsearchEventsUsingJest(esRestAddr, 100 + numMessages);
}
Also used : CriteriaBuilder(org.opennms.core.criteria.CriteriaBuilder) OnmsMinion(org.opennms.netmgt.model.minion.OnmsMinion) InetSocketAddress(java.net.InetSocketAddress) Date(java.util.Date) RateLimiter(com.google.common.util.concurrent.RateLimiter) Test(org.junit.Test)

Example 4 with RateLimiter

use of com.google.common.util.concurrent.RateLimiter in project opennms by OpenNMS.

the class SyslogKafkaElasticsearch5OutageIT method testMinionSyslogsOverKafkaToEsRest.

@Test
public void testMinionSyslogsOverKafkaToEsRest() throws Exception {
    Date startOfTest = new Date();
    int numMessages = 10000;
    int packetsPerSecond = 250;
    InetSocketAddress minionSshAddr = testEnvironment.getServiceAddress(ContainerAlias.MINION, 8201);
    InetSocketAddress opennmsSshAddr = testEnvironment.getServiceAddress(ContainerAlias.OPENNMS, 8101);
    InetSocketAddress kafkaAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 9092);
    InetSocketAddress zookeeperAddress = testEnvironment.getServiceAddress(ContainerAlias.KAFKA, 2181);
    // Install the Kafka syslog and trap handlers on the Minion system
    installFeaturesOnMinion(minionSshAddr, kafkaAddress);
    // Install the Kafka and Elasticsearch features on the OpenNMS system
    installFeaturesOnOpenNMS(opennmsSshAddr, kafkaAddress, zookeeperAddress);
    final String sender = testEnvironment.getContainerInfo(ContainerAlias.SNMPD).networkSettings().ipAddress();
    // Wait for the minion to show up
    await().atMost(90, SECONDS).pollInterval(5, SECONDS).until(DaoUtils.countMatchingCallable(getDaoFactory().getDao(MinionDaoHibernate.class), new CriteriaBuilder(OnmsMinion.class).gt("lastUpdated", startOfTest).eq("location", "MINION").toCriteria()), is(1));
    LOG.info("Warming up syslog routes by sending 100 packets");
    // Warm up the routes
    sendMessage(ContainerAlias.MINION, sender, 100);
    for (int i = 0; i < 10; i++) {
        LOG.info("Slept for " + i + " seconds");
        Thread.sleep(1000);
    }
    LOG.info("Resetting statistics");
    resetRouteStatistics(opennmsSshAddr, minionSshAddr);
    for (int i = 0; i < 20; i++) {
        LOG.info("Slept for " + i + " seconds");
        Thread.sleep(1000);
    }
    // Make sure that this evenly divides into the numMessages
    final int chunk = 250;
    // Make sure that this is an even multiple of chunk
    final int logEvery = 1000;
    int count = 0;
    long start = System.currentTimeMillis();
    AtomicInteger restartCounter = new AtomicInteger();
    // Start a timer that occasionally restarts Elasticsearch
    Timer restarter = new Timer("Elasticsearch-Restarter", true);
    restarter.scheduleAtFixedRate(new TimerTask() {

        @Override
        public void run() {
            final DockerClient docker = ((AbstractTestEnvironment) testEnvironment).getDockerClient();
            final String id = testEnvironment.getContainerInfo(ContainerAlias.ELASTICSEARCH_5).id();
            try {
                LOG.info("Restarting container: {}", id);
                docker.restartContainer(id);
                restartCounter.incrementAndGet();
                LOG.info("Container restarted: {}", id);
            } catch (DockerException | InterruptedException e) {
                LOG.warn("Unexpected exception while restarting container {}", id, e);
            }
        }
    }, 0L, TimeUnit.SECONDS.toMillis(29));
    // Send ${numMessages} syslog messages
    RateLimiter limiter = RateLimiter.create(packetsPerSecond);
    for (int i = 0; i < (numMessages / chunk); i++) {
        limiter.acquire(chunk);
        sendMessage(ContainerAlias.MINION, sender, chunk);
        count += chunk;
        if (count % logEvery == 0) {
            long mid = System.currentTimeMillis();
            LOG.info(String.format("Sent %d packets in %d milliseconds", logEvery, mid - start));
            start = System.currentTimeMillis();
        }
    }
    // Stop restarting Elasticsearch
    restarter.cancel();
    // 100 warm-up messages plus ${numMessages} messages
    pollForElasticsearchEventsUsingJest(this::getEs5Address, 100 + numMessages);
    assertTrue("Elasticsearch was never restarted", restartCounter.get() > 0);
}
Also used : CriteriaBuilder(org.opennms.core.criteria.CriteriaBuilder) DockerClient(com.spotify.docker.client.DockerClient) OnmsMinion(org.opennms.netmgt.model.minion.OnmsMinion) InetSocketAddress(java.net.InetSocketAddress) Date(java.util.Date) RateLimiter(com.google.common.util.concurrent.RateLimiter) Timer(java.util.Timer) TimerTask(java.util.TimerTask) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Test(org.junit.Test)

Example 5 with RateLimiter

use of com.google.common.util.concurrent.RateLimiter in project pulsar by yahoo.

the class PerformanceProducer method main.

public static void main(String[] args) throws Exception {
    final Arguments arguments = new Arguments();
    JCommander jc = new JCommander(arguments);
    jc.setProgramName("pulsar-perf-producer");
    try {
        jc.parse(args);
    } catch (ParameterException e) {
        System.out.println(e.getMessage());
        jc.usage();
        System.exit(-1);
    }
    if (arguments.help) {
        jc.usage();
        System.exit(-1);
    }
    if (arguments.destinations.size() != 1) {
        System.out.println("Only one topic name is allowed");
        jc.usage();
        System.exit(-1);
    }
    if (arguments.confFile != null) {
        Properties prop = new Properties(System.getProperties());
        prop.load(new FileInputStream(arguments.confFile));
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("brokerServiceUrl");
        }
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("webServiceUrl");
        }
        // fallback to previous-version serviceUrl property to maintain backward-compatibility
        if (arguments.serviceURL == null) {
            arguments.serviceURL = prop.getProperty("serviceUrl", "http://localhost:8080/");
        }
        if (arguments.authPluginClassName == null) {
            arguments.authPluginClassName = prop.getProperty("authPlugin", null);
        }
        if (arguments.authParams == null) {
            arguments.authParams = prop.getProperty("authParams", null);
        }
    }
    arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime);
    // Dump config variables
    ObjectMapper m = new ObjectMapper();
    ObjectWriter w = m.writerWithDefaultPrettyPrinter();
    log.info("Starting Pulsar perf producer with config: {}", w.writeValueAsString(arguments));
    // Read payload data from file if needed
    byte[] payloadData;
    if (arguments.payloadFilename != null) {
        payloadData = Files.readAllBytes(Paths.get(arguments.payloadFilename));
    } else {
        payloadData = new byte[arguments.msgSize];
    }
    // Now processing command line arguments
    String prefixTopicName = arguments.destinations.get(0);
    List<Future<Producer>> futures = Lists.newArrayList();
    EventLoopGroup eventLoopGroup;
    if (SystemUtils.IS_OS_LINUX) {
        eventLoopGroup = new EpollEventLoopGroup(Runtime.getRuntime().availableProcessors(), new DefaultThreadFactory("pulsar-perf-producer"));
    } else {
        eventLoopGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors(), new DefaultThreadFactory("pulsar-perf-producer"));
    }
    ClientConfiguration clientConf = new ClientConfiguration();
    clientConf.setConnectionsPerBroker(arguments.maxConnections);
    clientConf.setStatsInterval(arguments.statsIntervalSeconds, TimeUnit.SECONDS);
    if (isNotBlank(arguments.authPluginClassName)) {
        clientConf.setAuthentication(arguments.authPluginClassName, arguments.authParams);
    }
    PulsarClient client = new PulsarClientImpl(arguments.serviceURL, clientConf, eventLoopGroup);
    ProducerConfiguration producerConf = new ProducerConfiguration();
    producerConf.setSendTimeout(0, TimeUnit.SECONDS);
    producerConf.setCompressionType(arguments.compression);
    // enable round robin message routing if it is a partitioned topic
    producerConf.setMessageRoutingMode(MessageRoutingMode.RoundRobinPartition);
    if (arguments.batchTime > 0) {
        producerConf.setBatchingMaxPublishDelay(arguments.batchTime, TimeUnit.MILLISECONDS);
        producerConf.setBatchingEnabled(true);
        producerConf.setMaxPendingMessages(arguments.msgRate);
    }
    for (int i = 0; i < arguments.numTopics; i++) {
        String topic = (arguments.numTopics == 1) ? prefixTopicName : String.format("%s-%d", prefixTopicName, i);
        log.info("Adding {} publishers on destination {}", arguments.numProducers, topic);
        for (int j = 0; j < arguments.numProducers; j++) {
            futures.add(client.createProducerAsync(topic, producerConf));
        }
    }
    final List<Producer> producers = Lists.newArrayListWithCapacity(futures.size());
    for (Future<Producer> future : futures) {
        producers.add(future.get());
    }
    log.info("Created {} producers", producers.size());
    Runtime.getRuntime().addShutdownHook(new Thread() {

        public void run() {
            printAggregatedStats();
        }
    });
    Collections.shuffle(producers);
    AtomicBoolean isDone = new AtomicBoolean();
    executor.submit(() -> {
        try {
            RateLimiter rateLimiter = RateLimiter.create(arguments.msgRate);
            long startTime = System.currentTimeMillis();
            // Send messages on all topics/producers
            long totalSent = 0;
            while (true) {
                for (Producer producer : producers) {
                    if (arguments.testTime > 0) {
                        if (System.currentTimeMillis() - startTime > arguments.testTime) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }
                    if (arguments.numMessages > 0) {
                        if (totalSent++ >= arguments.numMessages) {
                            log.info("------------------- DONE -----------------------");
                            printAggregatedStats();
                            isDone.set(true);
                            Thread.sleep(5000);
                            System.exit(0);
                        }
                    }
                    rateLimiter.acquire();
                    final long sendTime = System.nanoTime();
                    producer.sendAsync(payloadData).thenRun(() -> {
                        messagesSent.increment();
                        bytesSent.add(payloadData.length);
                        long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                        recorder.recordValue(latencyMicros);
                        cumulativeRecorder.recordValue(latencyMicros);
                    }).exceptionally(ex -> {
                        log.warn("Write error on message", ex);
                        System.exit(-1);
                        return null;
                    });
                }
            }
        } catch (Throwable t) {
            log.error("Got error", t);
        }
    });
    // Print report stats
    long oldTime = System.nanoTime();
    Histogram reportHistogram = null;
    String statsFileName = "perf-producer-" + System.currentTimeMillis() + ".hgrm";
    log.info("Dumping latency stats to {}", statsFileName);
    PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false);
    HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog);
    // Some log header bits
    histogramLogWriter.outputLogFormatVersion();
    histogramLogWriter.outputLegend();
    while (true) {
        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            break;
        }
        if (isDone.get()) {
            break;
        }
        long now = System.nanoTime();
        double elapsed = (now - oldTime) / 1e9;
        double rate = messagesSent.sumThenReset() / elapsed;
        double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;
        reportHistogram = recorder.getIntervalHistogram(reportHistogram);
        log.info("Throughput produced: {}  msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}", throughputFormat.format(rate), throughputFormat.format(throughput), dec.format(reportHistogram.getMean() / 1000.0), dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0), dec.format(reportHistogram.getMaxValue() / 1000.0));
        histogramLogWriter.outputIntervalHistogram(reportHistogram);
        reportHistogram.reset();
        oldTime = now;
    }
    client.close();
}
Also used : Histogram(org.HdrHistogram.Histogram) ProducerConfiguration(com.yahoo.pulsar.client.api.ProducerConfiguration) Properties(java.util.Properties) DefaultThreadFactory(io.netty.util.concurrent.DefaultThreadFactory) JCommander(com.beust.jcommander.JCommander) ParameterException(com.beust.jcommander.ParameterException) PulsarClient(com.yahoo.pulsar.client.api.PulsarClient) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) HistogramLogWriter(org.HdrHistogram.HistogramLogWriter) PrintStream(java.io.PrintStream) ObjectWriter(com.fasterxml.jackson.databind.ObjectWriter) FileInputStream(java.io.FileInputStream) RateLimiter(com.google.common.util.concurrent.RateLimiter) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) EpollEventLoopGroup(io.netty.channel.epoll.EpollEventLoopGroup) EventLoopGroup(io.netty.channel.EventLoopGroup) NioEventLoopGroup(io.netty.channel.nio.NioEventLoopGroup) Producer(com.yahoo.pulsar.client.api.Producer) EpollEventLoopGroup(io.netty.channel.epoll.EpollEventLoopGroup) FileOutputStream(java.io.FileOutputStream) Future(java.util.concurrent.Future) PulsarClientImpl(com.yahoo.pulsar.client.impl.PulsarClientImpl) ClientConfiguration(com.yahoo.pulsar.client.api.ClientConfiguration)

Aggregations

RateLimiter (com.google.common.util.concurrent.RateLimiter)64 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)20 BigInteger (java.math.BigInteger)16 ThreadPoolTaskExecutor (org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor)14 ParameterException (com.beust.jcommander.ParameterException)12 Test (org.junit.Test)12 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)10 ApplicationContext (org.springframework.context.ApplicationContext)10 ClassPathXmlApplicationContext (org.springframework.context.support.ClassPathXmlApplicationContext)10 Random (java.util.Random)9 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)9 Service (org.fisco.bcos.channel.client.Service)9 Web3j (org.fisco.bcos.web3j.protocol.Web3j)9 ChannelEthereumService (org.fisco.bcos.web3j.protocol.channel.ChannelEthereumService)9 TransactionReceipt (org.fisco.bcos.web3j.protocol.core.methods.response.TransactionReceipt)9 Credentials (org.fisco.bcos.web3j.crypto.Credentials)8 ArrayList (java.util.ArrayList)7 ExecutorService (java.util.concurrent.ExecutorService)7 JCommander (com.beust.jcommander.JCommander)6 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)6