Search in sources :

Example 61 with PersistentTopic

use of org.apache.pulsar.broker.service.persistent.PersistentTopic in project incubator-pulsar by apache.

the class BatchMessageTest method testConcurrentBatchMessageAck.

/**
 * Verifies batch-message acking is thread-safe
 *
 * @throws Exception
 */
@Test(timeOut = 3000)
public void testConcurrentBatchMessageAck() throws Exception {
    int numMsgs = 10;
    final String topicName = "persistent://prop/use/ns-abc/testConcurrentAck";
    final String subscriptionName = "sub-1";
    Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).subscriptionType(SubscriptionType.Shared).subscribe();
    consumer.close();
    Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).batchingMaxPublishDelay(5, TimeUnit.SECONDS).batchingMaxMessages(numMsgs).enableBatching(true).create();
    List<CompletableFuture<MessageId>> sendFutureList = Lists.newArrayList();
    for (int i = 0; i < numMsgs; i++) {
        byte[] message = ("my-message-" + i).getBytes();
        Message<byte[]> msg = MessageBuilder.create().setContent(message).build();
        sendFutureList.add(producer.sendAsync(msg));
    }
    FutureUtil.waitForAll(sendFutureList).get();
    PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName);
    final Consumer<byte[]> myConsumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).subscriptionType(SubscriptionType.Shared).subscribe();
    // assertEquals(dispatcher.getTotalUnackedMessages(), 1);
    ExecutorService executor = Executors.newFixedThreadPool(10);
    final CountDownLatch latch = new CountDownLatch(numMsgs);
    final AtomicBoolean failed = new AtomicBoolean(false);
    for (int i = 0; i < numMsgs; i++) {
        executor.submit(() -> {
            try {
                Message<byte[]> msg = myConsumer.receive(1, TimeUnit.SECONDS);
                myConsumer.acknowledge(msg);
            } catch (Exception e) {
                failed.set(false);
            }
            latch.countDown();
        });
    }
    latch.await();
    PersistentDispatcherMultipleConsumers dispatcher = (PersistentDispatcherMultipleConsumers) topic.getSubscription(subscriptionName).getDispatcher();
    // check strategically to let ack-message receive by broker
    retryStrategically((test) -> dispatcher.getConsumers().get(0).getUnackedMessages() == 0, 5, 150);
    assertEquals(dispatcher.getConsumers().get(0).getUnackedMessages(), 0);
    executor.shutdown();
    myConsumer.close();
    producer.close();
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) PersistentDispatcherMultipleConsumers(org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers) PersistentTopic(org.apache.pulsar.broker.service.persistent.PersistentTopic) ExecutorService(java.util.concurrent.ExecutorService) Test(org.testng.annotations.Test)

Example 62 with PersistentTopic

use of org.apache.pulsar.broker.service.persistent.PersistentTopic in project incubator-pulsar by apache.

the class BatchMessageTest method testBatchProducerWithLargeMessage.

@Test(dataProvider = "codec")
public void testBatchProducerWithLargeMessage(CompressionType compressionType) throws Exception {
    int numMsgs = 50;
    int numMsgsInBatch = numMsgs / 2;
    final String topicName = "persistent://prop/use/finance/testBatchProducerWithLargeMessage";
    final String subscriptionName = "large-message-sub-1" + compressionType.toString();
    Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).subscribe();
    consumer.close();
    Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).compressionType(compressionType).batchingMaxPublishDelay(5, TimeUnit.SECONDS).batchingMaxMessages(numMsgsInBatch).enableBatching(true).create();
    List<CompletableFuture<MessageId>> sendFutureList = Lists.newArrayList();
    for (int i = 0; i < numMsgs; i++) {
        if (i == 25) {
            // send a large message
            byte[] largeMessage = new byte[128 * 1024 + 4];
            Message<byte[]> msg = MessageBuilder.create().setContent(largeMessage).build();
            sendFutureList.add(producer.sendAsync(msg));
        } else {
            byte[] message = ("msg-" + i).getBytes();
            Message<byte[]> msg = MessageBuilder.create().setContent(message).build();
            sendFutureList.add(producer.sendAsync(msg));
        }
    }
    byte[] message = ("msg-" + "last").getBytes();
    Message<byte[]> lastMsg = MessageBuilder.create().setContent(message).build();
    sendFutureList.add(producer.sendAsync(lastMsg));
    FutureUtil.waitForAll(sendFutureList).get();
    PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName);
    rolloverPerIntervalStats();
    assertTrue(topic.getProducers().values().iterator().next().getStats().msgRateIn > 0.0);
    // we expect 3 messages in the backlog since the large message in the middle should
    // close out the batch and be sent in a batch of its own
    assertEquals(topic.getSubscription(subscriptionName).getNumberOfEntriesInBacklog(), 3);
    consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).subscribe();
    for (int i = 0; i <= numMsgs; i++) {
        Message<byte[]> msg = consumer.receive(5, TimeUnit.SECONDS);
        assertNotNull(msg);
        LOG.info("received msg - {}", msg.getData().toString());
        consumer.acknowledge(msg);
    }
    Thread.sleep(100);
    assertEquals(topic.getSubscription(subscriptionName).getNumberOfEntriesInBacklog(), 0);
    consumer.close();
    producer.close();
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) PersistentTopic(org.apache.pulsar.broker.service.persistent.PersistentTopic) Test(org.testng.annotations.Test)

Example 63 with PersistentTopic

use of org.apache.pulsar.broker.service.persistent.PersistentTopic in project incubator-pulsar by apache.

the class BatchMessageTest method testSimpleBatchProducerWithFixedBatchSizeAndTime.

@Test(dataProvider = "codec")
public void testSimpleBatchProducerWithFixedBatchSizeAndTime(CompressionType compressionType) throws Exception {
    int numMsgs = 100;
    final String topicName = "persistent://prop/use/ns-abc/testSimpleBatchProducerWithFixedBatchSizeAndTime";
    final String subscriptionName = "time-size-sub-1" + compressionType.toString();
    Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).subscribe();
    consumer.close();
    Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).batchingMaxPublishDelay(10, TimeUnit.MILLISECONDS).batchingMaxMessages(5).compressionType(compressionType).enableBatching(true).create();
    Random random = new Random();
    List<CompletableFuture<MessageId>> sendFutureList = Lists.newArrayList();
    for (int i = 0; i < numMsgs; i++) {
        // put a random sleep from 0 to 3 ms
        Thread.sleep(random.nextInt(4));
        byte[] message = ("msg-" + i).getBytes();
        Message<byte[]> msg = MessageBuilder.create().setContent(message).build();
        sendFutureList.add(producer.sendAsync(msg));
    }
    FutureUtil.waitForAll(sendFutureList).get();
    PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName);
    rolloverPerIntervalStats();
    assertTrue(topic.getProducers().values().iterator().next().getStats().msgRateIn > 0.0);
    LOG.info("Sent {} messages, backlog is {} messages", numMsgs, topic.getSubscription(subscriptionName).getNumberOfEntriesInBacklog());
    assertTrue(topic.getSubscription(subscriptionName).getNumberOfEntriesInBacklog() < numMsgs);
    producer.close();
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) Random(java.util.Random) PersistentTopic(org.apache.pulsar.broker.service.persistent.PersistentTopic) Test(org.testng.annotations.Test)

Example 64 with PersistentTopic

use of org.apache.pulsar.broker.service.persistent.PersistentTopic in project incubator-pulsar by apache.

the class BatchMessageTest method testBatchAndNonBatchCumulativeAcks.

@Test
public void testBatchAndNonBatchCumulativeAcks() throws Exception {
    int numMsgs = 50;
    int numMsgsInBatch = numMsgs / 10;
    final String topicName = "persistent://prop/use/ns-abc/testBatchAndNonBatchCumulativeAcks";
    final String subscriptionName = "bnb-sub-1";
    Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).subscribe();
    consumer.close();
    Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).batchingMaxPublishDelay(5, TimeUnit.SECONDS).batchingMaxMessages(numMsgsInBatch).enableBatching(true).create();
    // create producer to publish non batch messages
    Producer<byte[]> noBatchProducer = pulsarClient.newProducer().topic(topicName).create();
    List<CompletableFuture<MessageId>> sendFutureList = Lists.newArrayList();
    for (int i = 0; i < numMsgs / 2; i++) {
        byte[] message = ("msg-" + i).getBytes();
        Message<byte[]> msg = MessageBuilder.create().setContent(message).build();
        sendFutureList.add(producer.sendAsync(msg));
        byte[] nobatchmsg = ("nobatch-" + i).getBytes();
        msg = MessageBuilder.create().setContent(nobatchmsg).build();
        sendFutureList.add(noBatchProducer.sendAsync(msg));
    }
    FutureUtil.waitForAll(sendFutureList).get();
    PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName);
    rolloverPerIntervalStats();
    assertTrue(topic.getProducers().values().iterator().next().getStats().msgRateIn > 0.0);
    assertEquals(topic.getSubscription(subscriptionName).getNumberOfEntriesInBacklog(), (numMsgs / 2) / numMsgsInBatch + numMsgs / 2);
    consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).subscribe();
    Message<byte[]> lastunackedMsg = null;
    for (int i = 0; i < numMsgs; i++) {
        Message<byte[]> msg = consumer.receive(5, TimeUnit.SECONDS);
        assertNotNull(msg);
        LOG.info("[{}] got message position{} data {}", subscriptionName, msg.getMessageId(), String.valueOf(msg.getData()));
        if (i % 2 == 0) {
            lastunackedMsg = msg;
        } else {
            consumer.acknowledgeCumulative(msg);
            LOG.info("[{}] did cumulative ack on position{} ", subscriptionName, msg.getMessageId());
        }
    }
    if (lastunackedMsg != null) {
        consumer.acknowledgeCumulative(lastunackedMsg);
    }
    Thread.sleep(100);
    assertEquals(topic.getSubscription(subscriptionName).getNumberOfEntriesInBacklog(), 0);
    consumer.close();
    producer.close();
    noBatchProducer.close();
}
Also used : CompletableFuture(java.util.concurrent.CompletableFuture) PersistentTopic(org.apache.pulsar.broker.service.persistent.PersistentTopic) Test(org.testng.annotations.Test)

Example 65 with PersistentTopic

use of org.apache.pulsar.broker.service.persistent.PersistentTopic in project incubator-pulsar by apache.

the class BrokerBkEnsemblesTests method testSkipCorruptDataLedger.

/**
 * It verifies broker-configuration using which broker can skip non-recoverable data-ledgers.
 *
 * <pre>
 * 1. publish messages in 5 data-ledgers each with 20 entries under managed-ledger
 * 2. delete first 4 data-ledgers
 * 3. consumer will fail to consume any message as first data-ledger is non-recoverable
 * 4. enable dynamic config to skip non-recoverable data-ledgers
 * 5. consumer will be able to consume 20 messages from last non-deleted ledger
 *
 * </pre>
 *
 * @throws Exception
 */
@Test(timeOut = 6000)
public void testSkipCorruptDataLedger() throws Exception {
    PulsarClient client = PulsarClient.builder().serviceUrl(adminUrl.toString()).statsInterval(0, TimeUnit.SECONDS).build();
    final String ns1 = "prop/usc/crash-broker";
    final int totalMessages = 100;
    final int totalDataLedgers = 5;
    final int entriesPerLedger = totalMessages / totalDataLedgers;
    admin.namespaces().createNamespace(ns1);
    final String topic1 = "persistent://" + ns1 + "/my-topic";
    // Create subscription
    Consumer<byte[]> consumer = client.newConsumer().topic(topic1).subscriptionName("my-subscriber-name").receiverQueueSize(5).subscribe();
    PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopic(topic1).get();
    ManagedLedgerImpl ml = (ManagedLedgerImpl) topic.getManagedLedger();
    ManagedCursorImpl cursor = (ManagedCursorImpl) ml.getCursors().iterator().next();
    Field configField = ManagedCursorImpl.class.getDeclaredField("config");
    configField.setAccessible(true);
    // Create multiple data-ledger
    ManagedLedgerConfig config = (ManagedLedgerConfig) configField.get(cursor);
    config.setMaxEntriesPerLedger(entriesPerLedger);
    config.setMinimumRolloverTime(1, TimeUnit.MILLISECONDS);
    // bookkeeper client
    Field bookKeeperField = ManagedLedgerImpl.class.getDeclaredField("bookKeeper");
    bookKeeperField.setAccessible(true);
    // Create multiple data-ledger
    BookKeeper bookKeeper = (BookKeeper) bookKeeperField.get(ml);
    // (1) publish messages in 5 data-ledgers each with 20 entries under managed-ledger
    Producer<byte[]> producer = client.newProducer().topic(topic1).create();
    for (int i = 0; i < totalMessages; i++) {
        String message = "my-message-" + i;
        producer.send(message.getBytes());
    }
    // validate: consumer is able to consume msg and close consumer after reading 1 entry
    Assert.assertNotNull(consumer.receive(1, TimeUnit.SECONDS));
    consumer.close();
    NavigableMap<Long, LedgerInfo> ledgerInfo = ml.getLedgersInfo();
    Assert.assertEquals(ledgerInfo.size(), totalDataLedgers);
    Entry<Long, LedgerInfo> lastLedger = ledgerInfo.lastEntry();
    // (2) delete first 4 data-ledgers
    ledgerInfo.entrySet().forEach(entry -> {
        if (!entry.equals(lastLedger)) {
            try {
                bookKeeper.deleteLedger(entry.getKey());
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    });
    // clean managed-ledger and recreate topic to clean any data from the cache
    producer.close();
    pulsar.getBrokerService().removeTopicFromCache(topic1);
    ManagedLedgerFactoryImpl factory = (ManagedLedgerFactoryImpl) pulsar.getManagedLedgerFactory();
    Field field = ManagedLedgerFactoryImpl.class.getDeclaredField("ledgers");
    field.setAccessible(true);
    @SuppressWarnings("unchecked") ConcurrentHashMap<String, CompletableFuture<ManagedLedgerImpl>> ledgers = (ConcurrentHashMap<String, CompletableFuture<ManagedLedgerImpl>>) field.get(factory);
    ledgers.clear();
    // (3) consumer will fail to consume any message as first data-ledger is non-recoverable
    Message<byte[]> msg = null;
    // start consuming message
    consumer = client.newConsumer().topic(topic1).subscriptionName("my-subscriber-name").subscribe();
    msg = consumer.receive(1, TimeUnit.SECONDS);
    Assert.assertNull(msg);
    consumer.close();
    // (4) enable dynamic config to skip non-recoverable data-ledgers
    admin.brokers().updateDynamicConfiguration("autoSkipNonRecoverableData", "true");
    retryStrategically((test) -> config.isAutoSkipNonRecoverableData(), 5, 100);
    // (5) consumer will be able to consume 20 messages from last non-deleted ledger
    consumer = client.newConsumer().topic(topic1).subscriptionName("my-subscriber-name").subscribe();
    for (int i = 0; i < entriesPerLedger; i++) {
        msg = consumer.receive(5, TimeUnit.SECONDS);
        System.out.println(i);
        consumer.acknowledge(msg);
    }
    producer.close();
    consumer.close();
    client.close();
}
Also used : ManagedCursorImpl(org.apache.bookkeeper.mledger.impl.ManagedCursorImpl) BookKeeper(org.apache.bookkeeper.client.BookKeeper) LedgerInfo(org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo.LedgerInfo) ManagedLedgerImpl(org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl) Field(java.lang.reflect.Field) CompletableFuture(java.util.concurrent.CompletableFuture) PersistentTopic(org.apache.pulsar.broker.service.persistent.PersistentTopic) PulsarClient(org.apache.pulsar.client.api.PulsarClient) ManagedLedgerConfig(org.apache.bookkeeper.mledger.ManagedLedgerConfig) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ManagedLedgerFactoryImpl(org.apache.bookkeeper.mledger.impl.ManagedLedgerFactoryImpl) Test(org.testng.annotations.Test)

Aggregations

PersistentTopic (org.apache.pulsar.broker.service.persistent.PersistentTopic)126 Test (org.testng.annotations.Test)100 PersistentSubscription (org.apache.pulsar.broker.service.persistent.PersistentSubscription)34 Field (java.lang.reflect.Field)23 CompletableFuture (java.util.concurrent.CompletableFuture)22 ManagedLedgerException (org.apache.bookkeeper.mledger.ManagedLedgerException)22 CountDownLatch (java.util.concurrent.CountDownLatch)20 PersistentDispatcherSingleActiveConsumer (org.apache.pulsar.broker.service.persistent.PersistentDispatcherSingleActiveConsumer)20 PulsarClientException (org.apache.pulsar.client.api.PulsarClientException)19 ManagedLedgerImpl (org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl)17 ExecutionException (java.util.concurrent.ExecutionException)16 NotAllowedException (org.apache.pulsar.broker.service.BrokerServiceException.NotAllowedException)13 KeeperException (org.apache.zookeeper.KeeperException)13 IOException (java.io.IOException)12 PulsarAdminException (org.apache.pulsar.client.admin.PulsarAdminException)12 PersistentReplicator (org.apache.pulsar.broker.service.persistent.PersistentReplicator)11 TopicName (org.apache.pulsar.common.naming.TopicName)11 DispatchRate (org.apache.pulsar.common.policies.data.DispatchRate)11 ByteBuf (io.netty.buffer.ByteBuf)10 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)10