use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testSyncProducerAndConsumer.
@Test(dataProvider = "batch")
public void testSyncProducerAndConsumer(int batchMessageDelayMs) throws Exception {
log.info("-- Starting {} test --", methodName);
ConsumerConfiguration conf = new ConsumerConfiguration();
conf.setSubscriptionType(SubscriptionType.Exclusive);
Consumer consumer = pulsarClient.subscribe("persistent://my-property/use/my-ns/my-topic1", "my-subscriber-name", conf);
ProducerConfiguration producerConf = new ProducerConfiguration();
if (batchMessageDelayMs != 0) {
producerConf.setBatchingEnabled(true);
producerConf.setBatchingMaxPublishDelay(batchMessageDelayMs, TimeUnit.MILLISECONDS);
producerConf.setBatchingMaxMessages(5);
}
Producer producer = pulsarClient.createProducer("persistent://my-property/use/my-ns/my-topic1", producerConf);
for (int i = 0; i < 10; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
Message msg = null;
Set<String> messageSet = Sets.newHashSet();
for (int i = 0; i < 10; i++) {
msg = consumer.receive(5, TimeUnit.SECONDS);
String receivedMessage = new String(msg.getData());
log.debug("Received message: [{}]", receivedMessage);
String expectedMessage = "my-message-" + i;
testMessageOrderAndDuplicates(messageSet, receivedMessage, expectedMessage);
}
// Acknowledge the consumption of all messages at once
consumer.acknowledgeCumulative(msg);
consumer.close();
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testFailReceiveAsyncOnConsumerClose.
@Test(timeOut = 5000)
public void testFailReceiveAsyncOnConsumerClose() throws Exception {
log.info("-- Starting {} test --", methodName);
// (1) simple consumers
Consumer consumer = pulsarClient.subscribe("persistent://my-property/use/my-ns/failAsyncReceive", "my-subscriber-name", new ConsumerConfiguration());
consumer.close();
// receive messages
try {
consumer.receiveAsync().get(1, TimeUnit.SECONDS);
fail("it should have failed because consumer is already closed");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof PulsarClientException.AlreadyClosedException);
}
// (2) Partitioned-consumer
int numPartitions = 4;
TopicName topicName = TopicName.get("persistent://my-property/use/my-ns/failAsyncReceive");
admin.persistentTopics().createPartitionedTopic(topicName.toString(), numPartitions);
Consumer partitionedConsumer = pulsarClient.subscribe(topicName.toString(), "my-partitioned-subscriber", new ConsumerConfiguration());
partitionedConsumer.close();
// receive messages
try {
partitionedConsumer.receiveAsync().get(1, TimeUnit.SECONDS);
fail("it should have failed because consumer is already closed");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof PulsarClientException.AlreadyClosedException);
}
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testMessageListener.
@Test(dataProvider = "batch", timeOut = 100000)
public void testMessageListener(int batchMessageDelayMs) throws Exception {
log.info("-- Starting {} test --", methodName);
ConsumerConfiguration conf = new ConsumerConfiguration();
conf.setSubscriptionType(SubscriptionType.Exclusive);
int numMessages = 100;
final CountDownLatch latch = new CountDownLatch(numMessages);
conf.setMessageListener((consumer, msg) -> {
Assert.assertNotNull(msg, "Message cannot be null");
String receivedMessage = new String(msg.getData());
log.debug("Received message [{}] in the listener", receivedMessage);
consumer.acknowledgeAsync(msg);
latch.countDown();
});
Consumer consumer = pulsarClient.subscribe("persistent://my-property/use/my-ns/my-topic3", "my-subscriber-name", conf);
ProducerConfiguration producerConf = new ProducerConfiguration();
if (batchMessageDelayMs != 0) {
producerConf.setBatchingMaxPublishDelay(batchMessageDelayMs, TimeUnit.MILLISECONDS);
producerConf.setBatchingMaxMessages(5);
producerConf.setBatchingEnabled(true);
}
Producer producer = pulsarClient.createProducer("persistent://my-property/use/my-ns/my-topic3", producerConf);
List<Future<MessageId>> futures = Lists.newArrayList();
// Asynchronously produce messages
for (int i = 0; i < numMessages; i++) {
final String message = "my-message-" + i;
Future<MessageId> future = producer.sendAsync(message.getBytes());
futures.add(future);
}
log.info("Waiting for async publish to complete");
for (Future<MessageId> future : futures) {
future.get();
}
log.info("Waiting for message listener to ack all messages");
assertEquals(latch.await(numMessages, TimeUnit.SECONDS), true, "Timed out waiting for message listener acks");
consumer.close();
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class BrokerServiceThrottlingTest method testLookupThrottlingForClientByBrokerInternalRetry.
/**
* This testcase make sure that once consumer lost connection with broker, it always reconnects with broker by
* retrying on throttling-error exception also.
*
* <pre>
* 1. all consumers get connected
* 2. broker restarts with maxConcurrentLookupRequest = 1
* 3. consumers reconnect and some get TooManyRequestException and again retries
* 4. eventually all consumers will successfully connect to broker
* </pre>
*
* @throws Exception
*/
@Test
public void testLookupThrottlingForClientByBrokerInternalRetry() throws Exception {
final String topicName = "persistent://prop/usw/my-ns/newTopic";
String lookupUrl = new URI("pulsar://localhost:" + BROKER_PORT).toString();
PulsarClient pulsarClient = PulsarClient.builder().serviceUrl(lookupUrl).statsInterval(0, TimeUnit.SECONDS).ioThreads(20).connectionsPerBroker(20).build();
upsertLookupPermits(100);
List<Consumer<byte[]>> consumers = Collections.synchronizedList(Lists.newArrayList());
ExecutorService executor = Executors.newFixedThreadPool(10);
final int totalConsumers = 8;
CountDownLatch latch = new CountDownLatch(totalConsumers);
for (int i = 0; i < totalConsumers; i++) {
executor.execute(() -> {
try {
consumers.add(pulsarClient.newConsumer().topic(topicName).subscriptionName("mysub").subscriptionType(SubscriptionType.Shared).subscribe());
} catch (PulsarClientException.TooManyRequestsException e) {
// ok
} catch (Exception e) {
fail("it shouldn't failed");
}
latch.countDown();
});
}
latch.await();
stopBroker();
conf.setMaxConcurrentLookupRequest(1);
startBroker();
// wait strategically for all consumers to reconnect
retryStrategically((test) -> areAllConsumersConnected(consumers), 5, 500);
int totalConnectedConsumers = 0;
for (int i = 0; i < consumers.size(); i++) {
if (((ConsumerImpl<?>) consumers.get(i)).isConnected()) {
totalConnectedConsumers++;
}
consumers.get(i).close();
}
assertEquals(totalConnectedConsumers, totalConsumers);
executor.shutdown();
pulsarClient.close();
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class CompactionTest method testWholeBatchCompactedOut.
@Test
public void testWholeBatchCompactedOut() throws Exception {
String topic = "persistent://my-property/use/my-ns/my-topic1";
// subscribe before sending anything, so that we get all messages
pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close();
try (Producer producerNormal = pulsarClient.newProducer().topic(topic).create();
Producer producerBatch = pulsarClient.newProducer().topic(topic).maxPendingMessages(3).enableBatching(true).batchingMaxMessages(3).batchingMaxPublishDelay(1, TimeUnit.HOURS).create()) {
producerBatch.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-1".getBytes()).build());
producerBatch.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-2".getBytes()).build());
producerBatch.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-3".getBytes()).build()).get();
producerNormal.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-4".getBytes()).build()).get();
}
// compact the topic
Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler);
compactor.compact(topic).get();
try (Consumer consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe()) {
Message message = consumer.receive();
Assert.assertEquals(message.getKey(), "key1");
Assert.assertEquals(new String(message.getData()), "my-message-4");
}
}
Aggregations