use of org.apache.pulsar.common.policies.data.PartitionedTopicStats in project incubator-pulsar by apache.
the class AdminApiTest2 method testIncrementPartitionsOfTopic.
/**
* <pre>
* It verifies increasing partitions for partitioned-topic.
* 1. create a partitioned-topic
* 2. update partitions with larger number of partitions
* 3. verify: getPartitionedMetadata and check number of partitions
* 4. verify: this api creates existing subscription to new partitioned-topics
* so, message will not be lost in new partitions
* a. start producer and produce messages
* b. check existing subscription for new topics and it should have backlog msgs
*
* </pre>
*
* @param topicName
* @throws Exception
*/
@Test
public void testIncrementPartitionsOfTopic() throws Exception {
final String topicName = "increment-partitionedTopic";
final String subName1 = topicName + "-my-sub-1";
final String subName2 = topicName + "-my-sub-2";
final int startPartitions = 4;
final int newPartitions = 8;
final String partitionedTopicName = "persistent://prop-xyz/use/ns1/" + topicName;
URL pulsarUrl = new URL("http://127.0.0.1" + ":" + BROKER_WEBSERVICE_PORT);
admin.persistentTopics().createPartitionedTopic(partitionedTopicName, startPartitions);
// validate partition topic is created
assertEquals(admin.persistentTopics().getPartitionedTopicMetadata(partitionedTopicName).partitions, startPartitions);
// create consumer and subscriptions : check subscriptions
PulsarClient client = PulsarClient.builder().serviceUrl(pulsarUrl.toString()).build();
Consumer<byte[]> consumer1 = client.newConsumer().topic(partitionedTopicName).subscriptionName(subName1).subscriptionType(SubscriptionType.Shared).subscribe();
assertEquals(admin.persistentTopics().getSubscriptions(partitionedTopicName), Lists.newArrayList(subName1));
Consumer<byte[]> consumer2 = client.newConsumer().topic(partitionedTopicName).subscriptionName(subName2).subscriptionType(SubscriptionType.Shared).subscribe();
assertEquals(Sets.newHashSet(admin.persistentTopics().getSubscriptions(partitionedTopicName)), Sets.newHashSet(subName1, subName2));
// (1) update partitions
admin.persistentTopics().updatePartitionedTopic(partitionedTopicName, newPartitions);
// invalidate global-cache to make sure that mock-zk-cache reds fresh data
pulsar.getGlobalZkCache().invalidateAll();
// verify new partitions have been created
assertEquals(admin.persistentTopics().getPartitionedTopicMetadata(partitionedTopicName).partitions, newPartitions);
// (2) No Msg loss: verify new partitions have the same existing subscription names
final String newPartitionTopicName = TopicName.get(partitionedTopicName).getPartition(startPartitions + 1).toString();
// (3) produce messages to all partitions including newly created partitions (RoundRobin)
Producer<byte[]> producer = client.newProducer().topic(partitionedTopicName).messageRoutingMode(MessageRoutingMode.RoundRobinPartition).create();
final int totalMessages = newPartitions * 2;
for (int i = 0; i < totalMessages; i++) {
String message = "message-" + i;
producer.send(message.getBytes());
}
// (4) verify existing subscription has not lost any message: create new consumer with sub-2: it will load all
// newly created partition topics
consumer2.close();
consumer2 = client.newConsumer().topic(partitionedTopicName).subscriptionName(subName2).subscriptionType(SubscriptionType.Shared).subscribe();
// sometime: mockZk fails to refresh ml-cache: so, invalidate the cache to get fresh data
pulsar.getLocalZkCacheService().managedLedgerListCache().clearTree();
assertEquals(Sets.newHashSet(admin.persistentTopics().getSubscriptions(newPartitionTopicName)), Sets.newHashSet(subName1, subName2));
assertEquals(Sets.newHashSet(admin.persistentTopics().getList("prop-xyz/use/ns1")).size(), newPartitions);
// test cumulative stats for partitioned topic
PartitionedTopicStats topicStats = admin.persistentTopics().getPartitionedStats(partitionedTopicName, false);
assertEquals(topicStats.subscriptions.keySet(), Sets.newTreeSet(Lists.newArrayList(subName1, subName2)));
assertEquals(topicStats.subscriptions.get(subName2).consumers.size(), 1);
assertEquals(topicStats.subscriptions.get(subName2).msgBacklog, totalMessages);
assertEquals(topicStats.publishers.size(), 1);
assertEquals(topicStats.partitions, Maps.newHashMap());
// (5) verify: each partition should have backlog
topicStats = admin.persistentTopics().getPartitionedStats(partitionedTopicName, true);
assertEquals(topicStats.metadata.partitions, newPartitions);
Set<String> partitionSet = Sets.newHashSet();
for (int i = 0; i < newPartitions; i++) {
partitionSet.add(partitionedTopicName + "-partition-" + i);
}
assertEquals(topicStats.partitions.keySet(), partitionSet);
for (int i = 0; i < newPartitions; i++) {
PersistentTopicStats partitionStats = topicStats.partitions.get(TopicName.get(partitionedTopicName).getPartition(i).toString());
assertEquals(partitionStats.publishers.size(), 1);
assertEquals(partitionStats.subscriptions.get(subName2).consumers.size(), 1);
assertEquals(partitionStats.subscriptions.get(subName2).msgBacklog, 2, 1);
}
producer.close();
consumer1.close();
consumer2.close();
consumer2.close();
}
Aggregations