use of org.apache.pulsar.client.api.Producer in project incubator-pulsar by apache.
the class MultiConsumersOneOutputTopicProducersTest method createMockProducer.
private Producer createMockProducer(String topic) {
Producer producer = mock(Producer.class);
when(producer.closeAsync()).thenAnswer(invocationOnMock -> {
synchronized (mockProducers) {
mockProducers.remove(topic);
}
return FutureUtils.Void();
});
return producer;
}
use of org.apache.pulsar.client.api.Producer in project incubator-pulsar by apache.
the class StormExample method main.
public static void main(String[] args) throws PulsarClientException {
ClientConfiguration clientConf = new ClientConfiguration();
// String authPluginClassName = "org.apache.pulsar.client.impl.auth.MyAuthentication";
// String authParams = "key1:val1,key2:val2";
// clientConf.setAuthentication(authPluginClassName, authParams);
String topic1 = "persistent://my-property/use/my-ns/my-topic1";
String topic2 = "persistent://my-property/use/my-ns/my-topic2";
String subscriptionName1 = "my-subscriber-name1";
String subscriptionName2 = "my-subscriber-name2";
// create spout
PulsarSpoutConfiguration spoutConf = new PulsarSpoutConfiguration();
spoutConf.setServiceUrl(serviceUrl);
spoutConf.setTopic(topic1);
spoutConf.setSubscriptionName(subscriptionName1);
spoutConf.setMessageToValuesMapper(messageToValuesMapper);
PulsarSpout spout = new PulsarSpout(spoutConf, clientConf);
// create bolt
PulsarBoltConfiguration boltConf = new PulsarBoltConfiguration();
boltConf.setServiceUrl(serviceUrl);
boltConf.setTopic(topic2);
boltConf.setTupleToMessageMapper(tupleToMessageMapper);
PulsarBolt bolt = new PulsarBolt(boltConf, clientConf);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("testSpout", spout);
builder.setBolt("testBolt", bolt).shuffleGrouping("testSpout");
Config conf = new Config();
conf.setNumWorkers(2);
conf.setDebug(true);
conf.registerMetricsConsumer(PulsarMetricsConsumer.class);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("test", conf, builder.createTopology());
Utils.sleep(10000);
PulsarClient pulsarClient = PulsarClient.create(serviceUrl, clientConf);
// create a consumer on topic2 to receive messages from the bolt when the processing is done
Consumer consumer = pulsarClient.subscribe(topic2, subscriptionName2);
// create a producer on topic1 to send messages that will be received by the spout
Producer producer = pulsarClient.createProducer(topic1);
for (int i = 0; i < 10; i++) {
String msg = "msg-" + i;
producer.send(msg.getBytes());
LOG.info("Message {} sent", msg);
}
Message msg = null;
for (int i = 0; i < 10; i++) {
msg = consumer.receive(1, TimeUnit.SECONDS);
LOG.info("Message {} received", new String(msg.getData()));
}
cluster.killTopology("test");
cluster.shutdown();
}
use of org.apache.pulsar.client.api.Producer in project incubator-pulsar by apache.
the class PerformanceProducer method main.
public static void main(String[] args) throws Exception {
final Arguments arguments = new Arguments();
JCommander jc = new JCommander(arguments);
jc.setProgramName("pulsar-perf-producer");
try {
jc.parse(args);
} catch (ParameterException e) {
System.out.println(e.getMessage());
jc.usage();
System.exit(-1);
}
if (arguments.help) {
jc.usage();
System.exit(-1);
}
if (arguments.topics.size() != 1) {
System.out.println("Only one topic name is allowed");
jc.usage();
System.exit(-1);
}
if (arguments.confFile != null) {
Properties prop = new Properties(System.getProperties());
prop.load(new FileInputStream(arguments.confFile));
if (arguments.serviceURL == null) {
arguments.serviceURL = prop.getProperty("brokerServiceUrl");
}
if (arguments.serviceURL == null) {
arguments.serviceURL = prop.getProperty("webServiceUrl");
}
// fallback to previous-version serviceUrl property to maintain backward-compatibility
if (arguments.serviceURL == null) {
arguments.serviceURL = prop.getProperty("serviceUrl", "http://localhost:8080/");
}
if (arguments.authPluginClassName == null) {
arguments.authPluginClassName = prop.getProperty("authPlugin", null);
}
if (arguments.authParams == null) {
arguments.authParams = prop.getProperty("authParams", null);
}
if (arguments.useTls == false) {
arguments.useTls = Boolean.parseBoolean(prop.getProperty("useTls"));
}
if (isBlank(arguments.tlsTrustCertsFilePath)) {
arguments.tlsTrustCertsFilePath = prop.getProperty("tlsTrustCertsFilePath", "");
}
}
arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime);
// Dump config variables
ObjectMapper m = new ObjectMapper();
ObjectWriter w = m.writerWithDefaultPrettyPrinter();
log.info("Starting Pulsar perf producer with config: {}", w.writeValueAsString(arguments));
// Read payload data from file if needed
byte[] payloadData;
if (arguments.payloadFilename != null) {
payloadData = Files.readAllBytes(Paths.get(arguments.payloadFilename));
} else {
payloadData = new byte[arguments.msgSize];
}
// Now processing command line arguments
String prefixTopicName = arguments.topics.get(0);
List<Future<Producer<byte[]>>> futures = Lists.newArrayList();
ClientBuilder clientBuilder = //
PulsarClient.builder().serviceUrl(//
arguments.serviceURL).connectionsPerBroker(//
arguments.maxConnections).ioThreads(//
Runtime.getRuntime().availableProcessors()).statsInterval(arguments.statsIntervalSeconds, //
TimeUnit.SECONDS).enableTls(//
arguments.useTls).tlsTrustCertsFilePath(arguments.tlsTrustCertsFilePath);
if (isNotBlank(arguments.authPluginClassName)) {
clientBuilder.authentication(arguments.authPluginClassName, arguments.authParams);
}
class EncKeyReader implements CryptoKeyReader {
EncryptionKeyInfo keyInfo = new EncryptionKeyInfo();
EncKeyReader(byte[] value) {
keyInfo.setKey(value);
}
@Override
public EncryptionKeyInfo getPublicKey(String keyName, Map<String, String> keyMeta) {
if (keyName.equals(arguments.encKeyName)) {
return keyInfo;
}
return null;
}
@Override
public EncryptionKeyInfo getPrivateKey(String keyName, Map<String, String> keyMeta) {
return null;
}
}
PulsarClient client = clientBuilder.build();
ProducerBuilder<byte[]> producerBuilder = //
client.newProducer().sendTimeout(0, //
TimeUnit.SECONDS).compressionType(//
arguments.compression).maxPendingMessages(//
arguments.maxOutstanding).messageRoutingMode(MessageRoutingMode.RoundRobinPartition);
if (arguments.batchTime > 0) {
producerBuilder.batchingMaxPublishDelay(arguments.batchTime, TimeUnit.MILLISECONDS).enableBatching(true);
}
// Block if queue is full else we will start seeing errors in sendAsync
producerBuilder.blockIfQueueFull(true);
if (arguments.encKeyName != null) {
producerBuilder.addEncryptionKey(arguments.encKeyName);
byte[] pKey = Files.readAllBytes(Paths.get(arguments.encKeyFile));
EncKeyReader keyReader = new EncKeyReader(pKey);
producerBuilder.cryptoKeyReader(keyReader);
}
for (int i = 0; i < arguments.numTopics; i++) {
String topic = (arguments.numTopics == 1) ? prefixTopicName : String.format("%s-%d", prefixTopicName, i);
log.info("Adding {} publishers on topic {}", arguments.numProducers, topic);
for (int j = 0; j < arguments.numProducers; j++) {
futures.add(producerBuilder.clone().topic(topic).createAsync());
}
}
final List<Producer<byte[]>> producers = Lists.newArrayListWithCapacity(futures.size());
for (Future<Producer<byte[]>> future : futures) {
producers.add(future.get());
}
log.info("Created {} producers", producers.size());
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
printAggregatedStats();
}
});
Collections.shuffle(producers);
AtomicBoolean isDone = new AtomicBoolean();
executor.submit(() -> {
try {
RateLimiter rateLimiter = RateLimiter.create(arguments.msgRate);
long startTime = System.currentTimeMillis();
// Send messages on all topics/producers
long totalSent = 0;
while (true) {
for (Producer<byte[]> producer : producers) {
if (arguments.testTime > 0) {
if (System.currentTimeMillis() - startTime > arguments.testTime) {
log.info("------------------- DONE -----------------------");
printAggregatedStats();
isDone.set(true);
Thread.sleep(5000);
System.exit(0);
}
}
if (arguments.numMessages > 0) {
if (totalSent++ >= arguments.numMessages) {
log.info("------------------- DONE -----------------------");
printAggregatedStats();
isDone.set(true);
Thread.sleep(5000);
System.exit(0);
}
}
rateLimiter.acquire();
final long sendTime = System.nanoTime();
producer.sendAsync(payloadData).thenRun(() -> {
messagesSent.increment();
bytesSent.add(payloadData.length);
long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime);
recorder.recordValue(latencyMicros);
cumulativeRecorder.recordValue(latencyMicros);
}).exceptionally(ex -> {
log.warn("Write error on message", ex);
System.exit(-1);
return null;
});
}
}
} catch (Throwable t) {
log.error("Got error", t);
}
});
// Print report stats
long oldTime = System.nanoTime();
Histogram reportHistogram = null;
String statsFileName = "perf-producer-" + System.currentTimeMillis() + ".hgrm";
log.info("Dumping latency stats to {}", statsFileName);
PrintStream histogramLog = new PrintStream(new FileOutputStream(statsFileName), false);
HistogramLogWriter histogramLogWriter = new HistogramLogWriter(histogramLog);
// Some log header bits
histogramLogWriter.outputLogFormatVersion();
histogramLogWriter.outputLegend();
while (true) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
break;
}
if (isDone.get()) {
break;
}
long now = System.nanoTime();
double elapsed = (now - oldTime) / 1e9;
double rate = messagesSent.sumThenReset() / elapsed;
double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8;
reportHistogram = recorder.getIntervalHistogram(reportHistogram);
log.info("Throughput produced: {} msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}", throughputFormat.format(rate), throughputFormat.format(throughput), dec.format(reportHistogram.getMean() / 1000.0), dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0), dec.format(reportHistogram.getMaxValue() / 1000.0));
histogramLogWriter.outputIntervalHistogram(reportHistogram);
reportHistogram.reset();
oldTime = now;
}
client.close();
}
use of org.apache.pulsar.client.api.Producer in project incubator-pulsar by apache.
the class PersistentQueueE2ETest method testRoundRobinBatchDistribution.
// this test is good to have to see the distribution, but every now and then it gets slightly different than the
// expected numbers. keeping this disabled to not break the build, but nevertheless this gives good insight into
// how the round robin distribution algorithm is behaving
@Test(enabled = false)
public void testRoundRobinBatchDistribution() throws Exception {
final String topicName = "persistent://prop/use/ns-abc/shared-topic5";
final String subName = "sub5";
final int numMsgs = 137;
/* some random number different than default batch size of 100 */
final AtomicInteger counter1 = new AtomicInteger(0);
final AtomicInteger counter2 = new AtomicInteger(0);
final AtomicInteger counter3 = new AtomicInteger(0);
final CountDownLatch latch = new CountDownLatch(numMsgs * 3);
ConsumerBuilder<byte[]> consumerBuilder = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName).receiverQueueSize(10).subscriptionType(SubscriptionType.Shared);
Consumer<byte[]> consumer1 = consumerBuilder.clone().messageListener((consumer, msg) -> {
try {
counter1.incrementAndGet();
consumer.acknowledge(msg);
latch.countDown();
} catch (Exception e) {
fail("Should not fail");
}
}).subscribe();
Consumer<byte[]> consumer2 = consumerBuilder.clone().messageListener((consumer, msg) -> {
try {
counter2.incrementAndGet();
consumer.acknowledge(msg);
latch.countDown();
} catch (Exception e) {
fail("Should not fail");
}
}).subscribe();
Consumer<byte[]> consumer3 = consumerBuilder.clone().messageListener((consumer, msg) -> {
try {
counter1.incrementAndGet();
consumer.acknowledge(msg);
latch.countDown();
} catch (Exception e) {
fail("Should not fail");
}
}).subscribe();
List<CompletableFuture<MessageId>> futures = Lists.newArrayListWithCapacity(numMsgs);
Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).create();
for (int i = 0; i < numMsgs * 3; i++) {
String message = "msg-" + i;
futures.add(producer.sendAsync(message.getBytes()));
}
FutureUtil.waitForAll(futures).get();
producer.close();
latch.await(1, TimeUnit.SECONDS);
/*
* total messages = 137 * 3 = 411 Each consumer has 10 permits. There will be 411 / 3*10 = 13 full distributions
* i.e. each consumer will get 130 messages. In the 14th round, the balance is 411 - 130*3 = 21. Two consumers
* will get another batch of 10 messages (Total: 140) and the 3rd one will get the last one (Total: 131)
*/
assertTrue(CollectionUtils.subtract(Lists.newArrayList(140, 140, 131), Lists.newArrayList(counter1.get(), counter2.get(), counter3.get())).isEmpty());
consumer1.close();
consumer2.close();
consumer3.close();
admin.persistentTopics().delete(topicName);
}
use of org.apache.pulsar.client.api.Producer in project incubator-pulsar by apache.
the class PersistentQueueE2ETest method testConsumersWithDifferentPermits.
@Test
public void testConsumersWithDifferentPermits() throws Exception {
final String topicName = "persistent://prop/use/ns-abc/shared-topic4";
final String subName = "sub4";
final int numMsgs = 10000;
final AtomicInteger msgCountConsumer1 = new AtomicInteger(0);
final AtomicInteger msgCountConsumer2 = new AtomicInteger(0);
final CountDownLatch latch = new CountDownLatch(numMsgs);
int recvQ1 = 10;
Consumer<byte[]> consumer1 = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName).subscriptionType(SubscriptionType.Shared).receiverQueueSize(recvQ1).messageListener((consumer, msg) -> {
msgCountConsumer1.incrementAndGet();
try {
consumer.acknowledge(msg);
latch.countDown();
} catch (PulsarClientException e) {
fail("Should not fail");
}
}).subscribe();
int recvQ2 = 1;
Consumer<byte[]> consumer2 = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName).subscriptionType(SubscriptionType.Shared).receiverQueueSize(recvQ2).messageListener((consumer, msg) -> {
msgCountConsumer2.incrementAndGet();
try {
consumer.acknowledge(msg);
latch.countDown();
} catch (PulsarClientException e) {
fail("Should not fail");
}
}).subscribe();
List<CompletableFuture<MessageId>> futures = Lists.newArrayListWithCapacity(numMsgs);
Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).maxPendingMessages(numMsgs + 1).create();
for (int i = 0; i < numMsgs; i++) {
String message = "msg-" + i;
futures.add(producer.sendAsync(message.getBytes()));
}
FutureUtil.waitForAll(futures).get();
producer.close();
latch.await(5, TimeUnit.SECONDS);
assertEquals(msgCountConsumer1.get(), numMsgs - numMsgs / (recvQ1 + recvQ2), numMsgs * 0.1);
assertEquals(msgCountConsumer2.get(), numMsgs / (recvQ1 + recvQ2), numMsgs * 0.1);
consumer1.close();
consumer2.close();
admin.persistentTopics().delete(topicName);
}
Aggregations