use of org.apache.pulsar.client.api.Message in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testAsyncProducerAndAsyncAck.
@Test(dataProvider = "batch")
public void testAsyncProducerAndAsyncAck(int batchMessageDelayMs) throws Exception {
log.info("-- Starting {} test --", methodName);
ConsumerConfiguration conf = new ConsumerConfiguration();
conf.setSubscriptionType(SubscriptionType.Exclusive);
Consumer consumer = pulsarClient.subscribe("persistent://my-property/use/my-ns/my-topic2", "my-subscriber-name", conf);
ProducerConfiguration producerConf = new ProducerConfiguration();
if (batchMessageDelayMs != 0) {
producerConf.setBatchingMaxPublishDelay(batchMessageDelayMs, TimeUnit.MILLISECONDS);
producerConf.setBatchingMaxMessages(5);
producerConf.setBatchingEnabled(true);
}
Producer producer = pulsarClient.createProducer("persistent://my-property/use/my-ns/my-topic2", producerConf);
List<Future<MessageId>> futures = Lists.newArrayList();
// Asynchronously produce messages
for (int i = 0; i < 10; i++) {
final String message = "my-message-" + i;
Future<MessageId> future = producer.sendAsync(message.getBytes());
futures.add(future);
}
log.info("Waiting for async publish to complete");
for (Future<MessageId> future : futures) {
future.get();
}
Message msg = null;
Set<String> messageSet = Sets.newHashSet();
for (int i = 0; i < 10; i++) {
msg = consumer.receive(5, TimeUnit.SECONDS);
String receivedMessage = new String(msg.getData());
log.info("Received message: [{}]", receivedMessage);
String expectedMessage = "my-message-" + i;
testMessageOrderAndDuplicates(messageSet, receivedMessage, expectedMessage);
}
// Asynchronously acknowledge upto and including the last message
Future<Void> ackFuture = consumer.acknowledgeCumulativeAsync(msg);
log.info("Waiting for async ack to complete");
ackFuture.get();
consumer.close();
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.client.api.Message in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testSyncProducerAndConsumer.
@Test(dataProvider = "batch")
public void testSyncProducerAndConsumer(int batchMessageDelayMs) throws Exception {
log.info("-- Starting {} test --", methodName);
ConsumerConfiguration conf = new ConsumerConfiguration();
conf.setSubscriptionType(SubscriptionType.Exclusive);
Consumer consumer = pulsarClient.subscribe("persistent://my-property/use/my-ns/my-topic1", "my-subscriber-name", conf);
ProducerConfiguration producerConf = new ProducerConfiguration();
if (batchMessageDelayMs != 0) {
producerConf.setBatchingEnabled(true);
producerConf.setBatchingMaxPublishDelay(batchMessageDelayMs, TimeUnit.MILLISECONDS);
producerConf.setBatchingMaxMessages(5);
}
Producer producer = pulsarClient.createProducer("persistent://my-property/use/my-ns/my-topic1", producerConf);
for (int i = 0; i < 10; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
Message msg = null;
Set<String> messageSet = Sets.newHashSet();
for (int i = 0; i < 10; i++) {
msg = consumer.receive(5, TimeUnit.SECONDS);
String receivedMessage = new String(msg.getData());
log.debug("Received message: [{}]", receivedMessage);
String expectedMessage = "my-message-" + i;
testMessageOrderAndDuplicates(messageSet, receivedMessage, expectedMessage);
}
// Acknowledge the consumption of all messages at once
consumer.acknowledgeCumulative(msg);
consumer.close();
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.client.api.Message in project incubator-pulsar by apache.
the class BrokerClientIntegrationTest method testResetCursor.
@Test(timeOut = 10000, dataProvider = "subType")
public void testResetCursor(SubscriptionType subType) throws Exception {
final RetentionPolicies policy = new RetentionPolicies(60, 52 * 1024);
final TopicName topicName = TopicName.get("persistent://my-property/use/my-ns/unacked-topic");
final int warmup = 20;
final int testSize = 150;
final List<Message<byte[]>> received = new ArrayList<>();
final String subsId = "sub";
final NavigableMap<Long, TimestampEntryCount> publishTimeIdMap = new ConcurrentSkipListMap<>();
// set delay time to start dispatching messages to active consumer in order to avoid message duplication
conf.setActiveConsumerFailoverDelayTimeMillis(500);
restartBroker();
admin.namespaces().setRetention(topicName.getNamespace(), policy);
ConsumerBuilder<byte[]> consumerBuilder = pulsarClient.newConsumer().topic(topicName.toString()).subscriptionName(subsId).subscriptionType(subType).messageListener((consumer, msg) -> {
try {
synchronized (received) {
received.add(msg);
}
consumer.acknowledge(msg);
long publishTime = ((MessageImpl<?>) msg).getPublishTime();
log.info(" publish time is " + publishTime + "," + msg.getMessageId());
TimestampEntryCount timestampEntryCount = publishTimeIdMap.computeIfAbsent(publishTime, (k) -> new TimestampEntryCount(publishTime));
timestampEntryCount.incrementAndGet();
} catch (final PulsarClientException e) {
log.warn("Failed to ack!");
}
});
Consumer<byte[]> consumer1 = consumerBuilder.subscribe();
Consumer<byte[]> consumer2 = consumerBuilder.subscribe();
final Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName.toString()).create();
log.info("warm up started for " + topicName.toString());
// send warmup msgs
byte[] msgBytes = new byte[1000];
for (Integer i = 0; i < warmup; i++) {
producer.send(msgBytes);
}
log.info("warm up finished.");
// sleep to ensure receiving of msgs
for (int n = 0; n < 10 && received.size() < warmup; n++) {
Thread.sleep(200);
}
// validate received msgs
Assert.assertEquals(received.size(), warmup);
received.clear();
// publish testSize num of msgs
log.info("Sending more messages.");
for (Integer n = 0; n < testSize; n++) {
producer.send(msgBytes);
Thread.sleep(1);
}
log.info("Sending more messages done.");
Thread.sleep(3000);
long begints = publishTimeIdMap.firstEntry().getKey();
long endts = publishTimeIdMap.lastEntry().getKey();
// find reset timestamp
long timestamp = (endts - begints) / 2 + begints;
timestamp = publishTimeIdMap.floorKey(timestamp);
NavigableMap<Long, TimestampEntryCount> expectedMessages = new ConcurrentSkipListMap<>();
expectedMessages.putAll(publishTimeIdMap.tailMap(timestamp, true));
received.clear();
log.info("reset cursor to " + timestamp + " for topic " + topicName.toString() + " for subs " + subsId);
log.info("issuing admin operation on " + admin.getServiceUrl().toString());
List<String> subList = admin.persistentTopics().getSubscriptions(topicName.toString());
for (String subs : subList) {
log.info("got sub " + subs);
}
publishTimeIdMap.clear();
// reset the cursor to this timestamp
Assert.assertTrue(subList.contains(subsId));
admin.persistentTopics().resetCursor(topicName.toString(), subsId, timestamp);
Thread.sleep(3000);
int totalExpected = 0;
for (TimestampEntryCount tec : expectedMessages.values()) {
totalExpected += tec.numMessages;
}
// validate that replay happens after the timestamp
Assert.assertTrue(publishTimeIdMap.firstEntry().getKey() >= timestamp);
consumer1.close();
consumer2.close();
producer.close();
// validate that expected and received counts match
int totalReceived = 0;
for (TimestampEntryCount tec : publishTimeIdMap.values()) {
totalReceived += tec.numMessages;
}
Assert.assertEquals(totalReceived, totalExpected, "did not receive all messages on replay after reset");
resetConfig();
restartBroker();
}
use of org.apache.pulsar.client.api.Message in project incubator-pulsar by apache.
the class CompactionTest method testWholeBatchCompactedOut.
@Test
public void testWholeBatchCompactedOut() throws Exception {
String topic = "persistent://my-property/use/my-ns/my-topic1";
// subscribe before sending anything, so that we get all messages
pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close();
try (Producer producerNormal = pulsarClient.newProducer().topic(topic).create();
Producer producerBatch = pulsarClient.newProducer().topic(topic).maxPendingMessages(3).enableBatching(true).batchingMaxMessages(3).batchingMaxPublishDelay(1, TimeUnit.HOURS).create()) {
producerBatch.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-1".getBytes()).build());
producerBatch.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-2".getBytes()).build());
producerBatch.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-3".getBytes()).build()).get();
producerNormal.sendAsync(MessageBuilder.create().setKey("key1").setContent("my-message-4".getBytes()).build()).get();
}
// compact the topic
Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler);
compactor.compact(topic).get();
try (Consumer consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe()) {
Message message = consumer.receive();
Assert.assertEquals(message.getKey(), "key1");
Assert.assertEquals(new String(message.getData()), "my-message-4");
}
}
use of org.apache.pulsar.client.api.Message in project incubator-pulsar by apache.
the class CompactedOutBatchMessageTest method testCompactedOutMessages.
@Test
public void testCompactedOutMessages() throws Exception {
final String ns1 = "my-property/use/con-ns1";
admin.namespaces().createNamespace(ns1);
final String topic1 = "persistent://" + ns1 + "/my-topic";
MessageMetadata metadata = MessageMetadata.newBuilder().setProducerName("foobar").setSequenceId(1).setPublishTime(1).setNumMessagesInBatch(3).build();
// build a buffer with 4 messages, first and last compacted out
ByteBuf batchBuffer = Unpooled.buffer(1000);
Commands.serializeSingleMessageInBatchWithPayload(SingleMessageMetadata.newBuilder().setCompactedOut(true).setPartitionKey("key1"), Unpooled.EMPTY_BUFFER, batchBuffer);
Commands.serializeSingleMessageInBatchWithPayload(SingleMessageMetadata.newBuilder().setCompactedOut(true).setPartitionKey("key2"), Unpooled.EMPTY_BUFFER, batchBuffer);
Commands.serializeSingleMessageInBatchWithPayload(SingleMessageMetadata.newBuilder().setCompactedOut(false).setPartitionKey("key3"), Unpooled.EMPTY_BUFFER, batchBuffer);
Commands.serializeSingleMessageInBatchWithPayload(SingleMessageMetadata.newBuilder().setCompactedOut(true).setPartitionKey("key4"), Unpooled.EMPTY_BUFFER, batchBuffer);
try (ConsumerImpl<byte[]> consumer = (ConsumerImpl<byte[]>) pulsarClient.newConsumer().topic(topic1).subscriptionName("my-subscriber-name").subscribe()) {
// shove it in the sideways
consumer.receiveIndividualMessagesFromBatch(metadata, batchBuffer, MessageIdData.newBuilder().setLedgerId(1234).setEntryId(567).build(), consumer.cnx());
Message m = consumer.receive();
assertEquals(((BatchMessageIdImpl) m.getMessageId()).getLedgerId(), 1234);
assertEquals(((BatchMessageIdImpl) m.getMessageId()).getEntryId(), 567);
assertEquals(((BatchMessageIdImpl) m.getMessageId()).getBatchIndex(), 2);
assertEquals(m.getKey(), "key3");
assertEquals(consumer.numMessagesInQueue(), 0);
}
}
Aggregations