use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testDeactivatingBacklogConsumer.
@Test
public void testDeactivatingBacklogConsumer() throws Exception {
log.info("-- Starting {} test --", methodName);
final long batchMessageDelayMs = 100;
final int receiverSize = 10;
final String topicName = "cache-topic";
final String topic = "persistent://my-property/use/my-ns/" + topicName;
final String sub1 = "faster-sub1";
final String sub2 = "slower-sub2";
ConsumerConfiguration conf = new ConsumerConfiguration();
conf.setSubscriptionType(SubscriptionType.Shared);
conf.setReceiverQueueSize(receiverSize);
ProducerConfiguration producerConf = new ProducerConfiguration();
if (batchMessageDelayMs != 0) {
producerConf.setBatchingEnabled(true);
producerConf.setBatchingMaxPublishDelay(batchMessageDelayMs, TimeUnit.MILLISECONDS);
producerConf.setBatchingMaxMessages(5);
}
// 1. Subscriber Faster subscriber: let it consume all messages immediately
Consumer subscriber1 = pulsarClient.subscribe("persistent://my-property/use/my-ns/" + topicName, sub1, conf);
// 1.b. Subscriber Slow subscriber:
conf.setReceiverQueueSize(receiverSize);
Consumer subscriber2 = pulsarClient.subscribe("persistent://my-property/use/my-ns/" + topicName, sub2, conf);
Producer producer = pulsarClient.createProducer(topic, producerConf);
PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topic);
ManagedLedgerImpl ledger = (ManagedLedgerImpl) topicRef.getManagedLedger();
// reflection to set/get cache-backlog fields value:
final long maxMessageCacheRetentionTimeMillis = 100;
Field backlogThresholdField = ManagedLedgerImpl.class.getDeclaredField("maxActiveCursorBacklogEntries");
backlogThresholdField.setAccessible(true);
Field field = ManagedLedgerImpl.class.getDeclaredField("maxMessageCacheRetentionTimeMillis");
field.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
field.set(ledger, maxMessageCacheRetentionTimeMillis);
final long maxActiveCursorBacklogEntries = (long) backlogThresholdField.get(ledger);
Message msg = null;
final int totalMsgs = (int) maxActiveCursorBacklogEntries + receiverSize + 1;
// 2. Produce messages
for (int i = 0; i < totalMsgs; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
// 3. Consume messages: at Faster subscriber
for (int i = 0; i < totalMsgs; i++) {
msg = subscriber1.receive(100, TimeUnit.MILLISECONDS);
subscriber1.acknowledge(msg);
}
// wait : so message can be eligible to to be evict from cache
Thread.sleep(maxMessageCacheRetentionTimeMillis);
// 4. deactivate subscriber which has built the backlog
ledger.checkBackloggedCursors();
Thread.sleep(100);
// 5. verify: active subscribers
Set<String> activeSubscriber = Sets.newHashSet();
ledger.getActiveCursors().forEach(c -> activeSubscriber.add(c.getName()));
assertTrue(activeSubscriber.contains(sub1));
assertFalse(activeSubscriber.contains(sub2));
// 6. consume messages : at slower subscriber
for (int i = 0; i < totalMsgs; i++) {
msg = subscriber2.receive(100, TimeUnit.MILLISECONDS);
subscriber2.acknowledge(msg);
}
ledger.checkBackloggedCursors();
activeSubscriber.clear();
ledger.getActiveCursors().forEach(c -> activeSubscriber.add(c.getName()));
assertTrue(activeSubscriber.contains(sub1));
assertTrue(activeSubscriber.contains(sub2));
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testSendTimeout.
@Test(dataProvider = "batch")
public void testSendTimeout(int batchMessageDelayMs) throws Exception {
log.info("-- Starting {} test --", methodName);
ConsumerConfiguration consumerConf = new ConsumerConfiguration();
consumerConf.setSubscriptionType(SubscriptionType.Exclusive);
Consumer consumer = pulsarClient.subscribe("persistent://my-property/use/my-ns/my-topic5", "my-subscriber-name", consumerConf);
ProducerConfiguration producerConf = new ProducerConfiguration();
if (batchMessageDelayMs != 0) {
producerConf.setBatchingMaxPublishDelay(2 * batchMessageDelayMs, TimeUnit.MILLISECONDS);
producerConf.setBatchingMaxMessages(5);
producerConf.setBatchingEnabled(true);
}
producerConf.setSendTimeout(1, TimeUnit.SECONDS);
Producer producer = pulsarClient.createProducer("persistent://my-property/use/my-ns/my-topic5", producerConf);
final String message = "my-message";
// Trigger the send timeout
stopBroker();
Future<MessageId> future = producer.sendAsync(message.getBytes());
try {
future.get();
Assert.fail("Send operation should have failed");
} catch (ExecutionException e) {
// Expected
}
startBroker();
// We should not have received any message
Message msg = consumer.receive(3, TimeUnit.SECONDS);
Assert.assertNull(msg);
consumer.close();
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testInvalidSequence.
@Test
public void testInvalidSequence() throws Exception {
log.info("-- Starting {} test --", methodName);
PulsarClient client1 = PulsarClient.create("http://127.0.0.1:" + BROKER_WEBSERVICE_PORT);
client1.close();
ConsumerConfiguration consumerConf = new ConsumerConfiguration();
consumerConf.setSubscriptionType(SubscriptionType.Exclusive);
try {
client1.subscribe("persistent://my-property/use/my-ns/my-topic6", "my-subscriber-name", consumerConf);
Assert.fail("Should fail");
} catch (PulsarClientException e) {
Assert.assertTrue(e instanceof PulsarClientException.AlreadyClosedException);
}
try {
client1.createProducer("persistent://my-property/use/my-ns/my-topic6");
Assert.fail("Should fail");
} catch (PulsarClientException e) {
Assert.assertTrue(e instanceof PulsarClientException.AlreadyClosedException);
}
Consumer consumer = pulsarClient.subscribe("persistent://my-property/use/my-ns/my-topic6", "my-subscriber-name", consumerConf);
try {
Message msg = MessageBuilder.create().setContent("InvalidMessage".getBytes()).build();
consumer.acknowledge(msg);
} catch (PulsarClientException.InvalidMessageException e) {
// ok
}
consumer.close();
try {
consumer.receive();
Assert.fail("Should fail");
} catch (PulsarClientException.AlreadyClosedException e) {
// ok
}
try {
consumer.unsubscribe();
Assert.fail("Should fail");
} catch (PulsarClientException.AlreadyClosedException e) {
// ok
}
Producer producer = pulsarClient.createProducer("persistent://my-property/use/my-ns/my-topic6");
producer.close();
try {
producer.send("message".getBytes());
Assert.fail("Should fail");
} catch (PulsarClientException.AlreadyClosedException e) {
// ok
}
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testActiveAndInActiveConsumerEntryCacheBehavior.
/**
* Usecase 1: Only 1 Active Subscription - 1 subscriber - Produce Messages - EntryCache should cache messages -
* EntryCache should be cleaned : Once active subscription consumes messages
*
* Usecase 2: 2 Active Subscriptions (faster and slower) and slower gets closed - 2 subscribers - Produce Messages -
* 1 faster-subscriber consumes all messages and another slower-subscriber none - EntryCache should have cached
* messages as slower-subscriber has not consumed messages yet - close slower-subscriber - EntryCache should be
* cleared
*
* @throws Exception
*/
@Test
public void testActiveAndInActiveConsumerEntryCacheBehavior() throws Exception {
log.info("-- Starting {} test --", methodName);
final long batchMessageDelayMs = 100;
final int receiverSize = 10;
final String topicName = "cache-topic";
final String sub1 = "faster-sub1";
final String sub2 = "slower-sub2";
ConsumerConfiguration conf = new ConsumerConfiguration();
conf.setSubscriptionType(SubscriptionType.Shared);
conf.setReceiverQueueSize(receiverSize);
ProducerConfiguration producerConf = new ProducerConfiguration();
if (batchMessageDelayMs != 0) {
producerConf.setBatchingEnabled(true);
producerConf.setBatchingMaxPublishDelay(batchMessageDelayMs, TimeUnit.MILLISECONDS);
producerConf.setBatchingMaxMessages(5);
}
/**
********** usecase-1: ************
*/
// 1. Subscriber Faster subscriber
Consumer subscriber1 = pulsarClient.subscribe("persistent://my-property/use/my-ns/" + topicName, sub1, conf);
final String topic = "persistent://my-property/use/my-ns/" + topicName;
Producer producer = pulsarClient.createProducer(topic, producerConf);
PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topic);
ManagedLedgerImpl ledger = (ManagedLedgerImpl) topicRef.getManagedLedger();
Field cacheField = ManagedLedgerImpl.class.getDeclaredField("entryCache");
cacheField.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(cacheField, cacheField.getModifiers() & ~Modifier.FINAL);
EntryCacheImpl entryCache = spy((EntryCacheImpl) cacheField.get(ledger));
cacheField.set(ledger, entryCache);
Message msg = null;
// 2. Produce messages
for (int i = 0; i < 30; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
// 3. Consume messages
for (int i = 0; i < 30; i++) {
msg = subscriber1.receive(5, TimeUnit.SECONDS);
subscriber1.acknowledge(msg);
}
// Verify: EntryCache has been invalidated
verify(entryCache, atLeastOnce()).invalidateEntries(any());
// sleep for a second: as ledger.updateCursorRateLimit RateLimiter will allow to invoke cursor-update after a
// second
//
Thread.sleep(1000);
// produce-consume one more message to trigger : ledger.internalReadFromLedger(..) which updates cursor and
// EntryCache
producer.send("message".getBytes());
msg = subscriber1.receive(5, TimeUnit.SECONDS);
// Verify: cache has to be cleared as there is no message needs to be consumed by active subscriber
assertEquals(entryCache.getSize(), 0, 1);
/**
********** usecase-2: ************
*/
// 1.b Subscriber slower-subscriber
Consumer subscriber2 = pulsarClient.subscribe("persistent://my-property/use/my-ns/" + topicName, sub2, conf);
// Produce messages
final int moreMessages = 10;
for (int i = 0; i < receiverSize + moreMessages; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
// Consume messages
for (int i = 0; i < receiverSize + moreMessages; i++) {
msg = subscriber1.receive(5, TimeUnit.SECONDS);
subscriber1.acknowledge(msg);
}
// sleep for a second: as ledger.updateCursorRateLimit RateLimiter will allow to invoke cursor-update after a
// second
//
Thread.sleep(1000);
// produce-consume one more message to trigger : ledger.internalReadFromLedger(..) which updates cursor and
// EntryCache
producer.send("message".getBytes());
msg = subscriber1.receive(5, TimeUnit.SECONDS);
// Verify: as active-subscriber2 has not consumed messages: EntryCache must have those entries in cache
assertTrue(entryCache.getSize() != 0);
// 3.b Close subscriber2: which will trigger cache to clear the cache
subscriber2.close();
// retry strategically until broker clean up closed subscribers and invalidate all cache entries
retryStrategically((test) -> entryCache.getSize() == 0, 5, 100);
// Verify: EntryCache should be cleared
assertTrue(entryCache.getSize() == 0);
subscriber1.close();
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.client.api.Consumer in project incubator-pulsar by apache.
the class V1_ProducerConsumerTest method testSendBigMessageSizeButCompressed.
/**
* Verifies non-batch message size being validated after performing compression while batch-messaging validates
* before compression of message
*
* <pre>
* send msg with size > MAX_SIZE (5 MB)
* a. non-batch with compression: pass
* b. batch-msg with compression: fail
* c. non-batch w/o compression: fail
* d. non-batch with compression, consumer consume: pass
* </pre>
*
* @throws Exception
*/
@Test
public void testSendBigMessageSizeButCompressed() throws Exception {
log.info("-- Starting {} test --", methodName);
final String topic = "persistent://my-property/use/my-ns/bigMsg";
// (a) non-batch msg with compression
ProducerConfiguration producerConf = new ProducerConfiguration();
producerConf.setCompressionType(CompressionType.LZ4);
Producer producer = pulsarClient.createProducer(topic, producerConf);
Message message = MessageBuilder.create().setContent(new byte[PulsarDecoder.MaxMessageSize + 1]).build();
producer.send(message);
producer.close();
// (b) batch-msg
producerConf = new ProducerConfiguration();
producerConf.setBatchingEnabled(true);
producerConf.setCompressionType(CompressionType.LZ4);
producer = pulsarClient.createProducer(topic, producerConf);
message = MessageBuilder.create().setContent(new byte[PulsarDecoder.MaxMessageSize + 1]).build();
try {
producer.send(message);
fail("Should have thrown exception");
} catch (PulsarClientException.InvalidMessageException e) {
// OK
}
producer.close();
// (c) non-batch msg without compression
producerConf = new ProducerConfiguration();
producerConf.setCompressionType(CompressionType.NONE);
producer = pulsarClient.createProducer(topic, producerConf);
message = MessageBuilder.create().setContent(new byte[PulsarDecoder.MaxMessageSize + 1]).build();
try {
producer.send(message);
fail("Should have thrown exception");
} catch (PulsarClientException.InvalidMessageException e) {
// OK
}
producer.close();
// (d) non-batch msg with compression and try to consume message
producerConf = new ProducerConfiguration();
producerConf.setCompressionType(CompressionType.LZ4);
producer = pulsarClient.createProducer(topic, producerConf);
Consumer consumer = pulsarClient.subscribe(topic, "sub1");
byte[] content = new byte[PulsarDecoder.MaxMessageSize + 10];
message = MessageBuilder.create().setContent(content).build();
producer.send(message);
assertEquals(consumer.receive().getData(), content);
producer.close();
consumer.close();
}
Aggregations