use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class TopicsConsumerImplTest method testAsyncConsumer.
@Test(timeOut = testTimeout)
public void testAsyncConsumer() throws Exception {
String key = "TopicsConsumerAsyncTest";
final String subscriptionName = "my-ex-subscription-" + key;
final String messagePredicate = "my-message-" + key + "-";
final int totalMessages = 30;
final String topicName1 = "persistent://prop/use/ns-abc/topic-1-" + key;
final String topicName2 = "persistent://prop/use/ns-abc/topic-2-" + key;
final String topicName3 = "persistent://prop/use/ns-abc/topic-3-" + key;
List<String> topicNames = Lists.newArrayList(topicName1, topicName2, topicName3);
admin.properties().createProperty("prop", new PropertyAdmin());
admin.persistentTopics().createPartitionedTopic(topicName2, 2);
admin.persistentTopics().createPartitionedTopic(topicName3, 3);
// 1. producer connect
Producer<byte[]> producer1 = pulsarClient.newProducer().topic(topicName1).create();
Producer<byte[]> producer2 = pulsarClient.newProducer().topic(topicName2).messageRoutingMode(org.apache.pulsar.client.api.MessageRoutingMode.RoundRobinPartition).create();
Producer<byte[]> producer3 = pulsarClient.newProducer().topic(topicName3).messageRoutingMode(org.apache.pulsar.client.api.MessageRoutingMode.RoundRobinPartition).create();
// 2. Create consumer
Consumer<byte[]> consumer = pulsarClient.newConsumer().topics(topicNames).subscriptionName(subscriptionName).subscriptionType(SubscriptionType.Shared).ackTimeout(ackTimeOutMillis, TimeUnit.MILLISECONDS).receiverQueueSize(4).subscribe();
assertTrue(consumer instanceof TopicsConsumerImpl);
// Asynchronously produce messages
List<Future<MessageId>> futures = Lists.newArrayList();
for (int i = 0; i < totalMessages / 3; i++) {
futures.add(producer1.sendAsync((messagePredicate + "producer1-" + i).getBytes()));
futures.add(producer2.sendAsync((messagePredicate + "producer2-" + i).getBytes()));
futures.add(producer3.sendAsync((messagePredicate + "producer3-" + i).getBytes()));
}
log.info("Waiting for async publish to complete : {}", futures.size());
for (Future<MessageId> future : futures) {
future.get();
}
log.info("start async consume");
CountDownLatch latch = new CountDownLatch(totalMessages);
ExecutorService executor = Executors.newFixedThreadPool(1);
executor.execute(() -> IntStream.range(0, totalMessages).forEach(index -> consumer.receiveAsync().thenAccept(msg -> {
assertTrue(msg instanceof TopicMessageImpl);
try {
consumer.acknowledge(msg);
} catch (PulsarClientException e1) {
fail("message acknowledge failed", e1);
}
latch.countDown();
log.info("receive index: {}, latch countDown: {}", index, latch.getCount());
}).exceptionally(ex -> {
log.warn("receive index: {}, failed receive message {}", index, ex.getMessage());
ex.printStackTrace();
return null;
})));
latch.await();
log.info("success latch wait");
consumer.unsubscribe();
consumer.close();
producer1.close();
producer2.close();
producer3.close();
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class CompactionTest method testFirstMessageRetained.
@Test
public void testFirstMessageRetained() throws Exception {
String topic = "persistent://my-property/use/my-ns/my-topic1";
// subscribe before sending anything, so that we get all messages
pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close();
try (Producer producer = pulsarClient.createProducer(topic)) {
producer.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-1".getBytes()).build());
producer.sendAsync(MessageBuilder.create().setKey("key2").setContent("my-message-2".getBytes()).build());
producer.sendAsync(MessageBuilder.create().setKey("key2").setContent("my-message-3".getBytes()).build()).get();
}
// Read messages before compaction to get ids
List<Message> messages = new ArrayList<>();
try (Consumer consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe()) {
messages.add(consumer.receive());
messages.add(consumer.receive());
messages.add(consumer.receive());
}
// compact the topic
Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler);
compactor.compact(topic).get();
// Check that messages after compaction have same ids
try (Consumer consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe()) {
Message message1 = consumer.receive();
Assert.assertEquals(message1.getKey(), "key1");
Assert.assertEquals(new String(message1.getData()), "my-message-1");
Assert.assertEquals(message1.getMessageId(), messages.get(0).getMessageId());
Message message2 = consumer.receive();
Assert.assertEquals(message2.getKey(), "key2");
Assert.assertEquals(new String(message2.getData()), "my-message-3");
Assert.assertEquals(message2.getMessageId(), messages.get(2).getMessageId());
}
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class CompactionTest method testBatchMessageIdsDontChange.
@Test
public void testBatchMessageIdsDontChange() throws Exception {
String topic = "persistent://my-property/use/my-ns/my-topic1";
// subscribe before sending anything, so that we get all messages
pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close();
try (Producer producer = pulsarClient.newProducer().topic(topic).maxPendingMessages(3).enableBatching(true).batchingMaxMessages(3).batchingMaxPublishDelay(1, TimeUnit.HOURS).create()) {
producer.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-1".getBytes()).build());
producer.sendAsync(MessageBuilder.create().setKey("key2").setContent("my-message-2".getBytes()).build());
producer.sendAsync(MessageBuilder.create().setKey("key2").setContent("my-message-3".getBytes()).build()).get();
}
// Read messages before compaction to get ids
List<Message> messages = new ArrayList<>();
try (Consumer consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe()) {
messages.add(consumer.receive());
messages.add(consumer.receive());
messages.add(consumer.receive());
}
// Ensure all messages are in same batch
Assert.assertEquals(((BatchMessageIdImpl) messages.get(0).getMessageId()).getLedgerId(), ((BatchMessageIdImpl) messages.get(1).getMessageId()).getLedgerId());
Assert.assertEquals(((BatchMessageIdImpl) messages.get(0).getMessageId()).getLedgerId(), ((BatchMessageIdImpl) messages.get(2).getMessageId()).getLedgerId());
Assert.assertEquals(((BatchMessageIdImpl) messages.get(0).getMessageId()).getEntryId(), ((BatchMessageIdImpl) messages.get(1).getMessageId()).getEntryId());
Assert.assertEquals(((BatchMessageIdImpl) messages.get(0).getMessageId()).getEntryId(), ((BatchMessageIdImpl) messages.get(2).getMessageId()).getEntryId());
// compact the topic
Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler);
compactor.compact(topic).get();
// Check that messages after compaction have same ids
try (Consumer consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe()) {
Message message1 = consumer.receive();
Assert.assertEquals(message1.getKey(), "key1");
Assert.assertEquals(new String(message1.getData()), "my-message-1");
Assert.assertEquals(message1.getMessageId(), messages.get(0).getMessageId());
Message message2 = consumer.receive();
Assert.assertEquals(message2.getKey(), "key2");
Assert.assertEquals(new String(message2.getData()), "my-message-3");
Assert.assertEquals(message2.getMessageId(), messages.get(2).getMessageId());
}
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class PersistentTopicE2ETest method testConcurrentConsumerThreads.
// some race conditions needs to be handled
// disabling the test for now to not block commit jobs
@Test(enabled = false)
public void testConcurrentConsumerThreads() throws Exception {
// test concurrent consumer threads on same consumerId
final String topicName = "persistent://prop/use/ns-abc/topic3";
final String subName = "sub3";
final int recvQueueSize = 100;
final int numConsumersThreads = 10;
ExecutorService executor = Executors.newCachedThreadPool();
final CyclicBarrier barrier = new CyclicBarrier(numConsumersThreads + 1);
for (int i = 0; i < numConsumersThreads; i++) {
executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
barrier.await();
Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName).receiverQueueSize(recvQueueSize).subscribe();
for (int i = 0; i < recvQueueSize / numConsumersThreads; i++) {
Message<byte[]> msg = consumer.receive();
consumer.acknowledge(msg);
}
return null;
}
});
}
Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).create();
for (int i = 0; i < recvQueueSize * numConsumersThreads; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
barrier.await();
Thread.sleep(ASYNC_EVENT_COMPLETION_WAIT);
PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName);
PersistentSubscription subRef = topicRef.getSubscription(subName);
// 1. cumulatively all threads drain the backlog
assertEquals(subRef.getNumberOfEntriesInBacklog(), 0);
// 2. flow control works the same as single consumer single thread
Thread.sleep(ASYNC_EVENT_COMPLETION_WAIT);
assertEquals(getAvailablePermits(subRef), recvQueueSize);
executor.shutdown();
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testBlockUnackedConsumerRedeliverySpecificMessagesProduceWithPause.
/**
* It verifies that redelivery-of-specific messages: that redelivers all those messages even when consumer gets
* blocked due to unacked messsages
*
* Usecase: produce message with 10ms interval: so, consumer can consume only 10 messages without acking
*
* @throws Exception
*/
@Test
public void testBlockUnackedConsumerRedeliverySpecificMessagesProduceWithPause() throws Exception {
log.info("-- Starting {} test --", methodName);
int unAckedMessages = pulsar.getConfiguration().getMaxUnackedMessagesPerConsumer();
try {
final int unAckedMessagesBufferSize = 10;
final int receiverQueueSize = 20;
final int totalProducedMsgs = 20;
pulsar.getConfiguration().setMaxUnackedMessagesPerConsumer(unAckedMessagesBufferSize);
ConsumerConfiguration conf = new ConsumerConfiguration();
conf.setReceiverQueueSize(receiverQueueSize);
conf.setSubscriptionType(SubscriptionType.Shared);
ConsumerImpl consumer = (ConsumerImpl) pulsarClient.subscribe("persistent://my-property/use/my-ns/unacked-topic", "subscriber-1", conf);
ProducerConfiguration producerConf = new ProducerConfiguration();
Producer producer = pulsarClient.createProducer("persistent://my-property/use/my-ns/unacked-topic", producerConf);
// (1) Produced Messages
for (int i = 0; i < totalProducedMsgs; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
Thread.sleep(10);
}
// (2) try to consume messages: but will be able to consume number of messages = unAckedMessagesBufferSize
Message msg = null;
List<Message> messages1 = Lists.newArrayList();
for (int i = 0; i < totalProducedMsgs; i++) {
msg = consumer.receive(1, TimeUnit.SECONDS);
if (msg != null) {
messages1.add(msg);
log.info("Received message: " + new String(msg.getData()));
} else {
break;
}
}
// client should not receive all produced messages and should be blocked due to unack-messages
assertEquals(messages1.size(), unAckedMessagesBufferSize);
Set<MessageIdImpl> redeliveryMessages = messages1.stream().map(m -> {
return (MessageIdImpl) m.getMessageId();
}).collect(Collectors.toSet());
// (3) redeliver all consumed messages
consumer.redeliverUnacknowledgedMessages(Sets.newHashSet(redeliveryMessages));
Thread.sleep(1000);
Set<MessageIdImpl> messages2 = Sets.newHashSet();
for (int i = 0; i < totalProducedMsgs; i++) {
msg = consumer.receive(1, TimeUnit.SECONDS);
if (msg != null) {
messages2.add((MessageIdImpl) msg.getMessageId());
log.info("Received message: " + new String(msg.getData()));
} else {
break;
}
}
assertEquals(messages1.size(), messages2.size());
// (4) Verify: redelivered all previous unacked-consumed messages
messages2.removeAll(redeliveryMessages);
assertEquals(messages2.size(), 0);
producer.close();
consumer.close();
log.info("-- Exiting {} test --", methodName);
} catch (Exception e) {
fail();
} finally {
pulsar.getConfiguration().setMaxUnackedMessagesPerConsumer(unAckedMessages);
}
}
Aggregations