Search in sources :

Example 1 with KopTopic

use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.

the class GroupMetadataManagerTest method testCommitOffset.

@Test
public void testCommitOffset() throws Exception {
    @Cleanup Consumer<ByteBuffer> consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER).topic(groupMetadataManager.getTopicPartitionName()).subscriptionName("test-sub").subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe();
    String memberId = "fakeMemberId";
    TopicPartition topicPartition = new TopicPartition("foo", 0);
    groupMetadataManager.addPartitionOwnership(groupPartitionId);
    long offset = 37L;
    GroupMetadata group = new GroupMetadata(groupId, Empty);
    groupMetadataManager.addGroup(group);
    Map<TopicPartition, OffsetAndMetadata> offsets = ImmutableMap.<TopicPartition, OffsetAndMetadata>builder().put(topicPartition, OffsetAndMetadata.apply(offset)).build();
    Map<TopicPartition, Errors> commitErrors = groupMetadataManager.storeOffsets(group, memberId, offsets).get();
    assertTrue(group.hasOffsets());
    assertFalse(commitErrors.isEmpty());
    Errors maybeError = commitErrors.get(topicPartition);
    assertEquals(Errors.NONE, maybeError);
    assertTrue(group.hasOffsets());
    Map<TopicPartition, PartitionData> cachedOffsets = groupMetadataManager.getOffsets(groupId, Optional.of(Lists.newArrayList(topicPartition)));
    PartitionData maybePartitionResponse = cachedOffsets.get(topicPartition);
    assertNotNull(maybePartitionResponse);
    assertEquals(Errors.NONE, maybePartitionResponse.error);
    assertEquals(offset, maybePartitionResponse.offset);
    Message<ByteBuffer> message = consumer.receive();
    while (message.getValue().array().length == 0) {
        // bypass above place holder message.
        message = consumer.receive();
    }
    assertTrue(message.getEventTime() > 0L);
    assertTrue(message.hasKey());
    byte[] key = message.getKeyBytes();
    BaseKey groupKey = GroupMetadataConstants.readMessageKey(ByteBuffer.wrap(key));
    assertTrue(groupKey instanceof OffsetKey);
    ByteBuffer value = message.getValue();
    MemoryRecords memRecords = MemoryRecords.readableRecords(value);
    AtomicBoolean verified = new AtomicBoolean(false);
    memRecords.batches().forEach(batch -> {
        for (Record record : batch) {
            assertFalse(verified.get());
            BaseKey bk = GroupMetadataConstants.readMessageKey(record.key());
            assertTrue(bk instanceof OffsetKey);
            OffsetKey ok = (OffsetKey) bk;
            GroupTopicPartition gtp = ok.key();
            assertEquals(groupId, gtp.group());
            assertEquals(new TopicPartition(new KopTopic(topicPartition.topic(), NAMESPACE_PREFIX).getFullName(), topicPartition.partition()), gtp.topicPartition());
            OffsetAndMetadata gm = GroupMetadataConstants.readOffsetMessageValue(record.value());
            assertEquals(offset, gm.offset());
            verified.set(true);
        }
    });
    assertTrue(verified.get());
}
Also used : BaseKey(io.streamnative.pulsar.handlers.kop.coordinator.group.GroupMetadataManager.BaseKey) Cleanup(lombok.Cleanup) ByteBuffer(java.nio.ByteBuffer) GroupTopicPartition(io.streamnative.pulsar.handlers.kop.coordinator.group.GroupMetadataManager.GroupTopicPartition) Errors(org.apache.kafka.common.protocol.Errors) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) PartitionData(org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) GroupTopicPartition(io.streamnative.pulsar.handlers.kop.coordinator.group.GroupMetadataManager.GroupTopicPartition) OffsetAndMetadata(io.streamnative.pulsar.handlers.kop.offset.OffsetAndMetadata) OffsetKey(io.streamnative.pulsar.handlers.kop.coordinator.group.GroupMetadataManager.OffsetKey) Record(org.apache.kafka.common.record.Record) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) KopTopic(io.streamnative.pulsar.handlers.kop.utils.KopTopic) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.testng.annotations.Test)

Example 2 with KopTopic

use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.

the class KafkaTopicConsumerManagerTest method testCursorCountForMultiGroups.

@Test(timeOut = 20000)
public void testCursorCountForMultiGroups() throws Exception {
    final String topic = "test-cursor-count-for-multi-groups";
    final String partitionName = new KopTopic(topic, "public/default").getPartitionName(0);
    final int numMessages = 100;
    final int numConsumers = 5;
    final KafkaProducer<String, String> producer = new KafkaProducer<>(newKafkaProducerProperties());
    for (int i = 0; i < numMessages; i++) {
        producer.send(new ProducerRecord<>(topic, "msg-" + i)).get();
    }
    producer.close();
    final List<KafkaConsumer<String, String>> consumers = IntStream.range(0, numConsumers).mapToObj(i -> {
        final Properties props = newKafkaConsumerProperties();
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group-" + i);
        final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Collections.singleton(topic));
        return consumer;
    }).collect(Collectors.toList());
    final CountDownLatch latch = new CountDownLatch(numConsumers);
    final ExecutorService executor = Executors.newFixedThreadPool(numConsumers);
    for (int i = 0; i < numConsumers; i++) {
        final int index = i;
        final KafkaConsumer<String, String> consumer = consumers.get(i);
        executor.execute(() -> {
            int numReceived = 0;
            while (numReceived < numMessages) {
                final ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));
                records.forEach(record -> {
                    if (log.isDebugEnabled()) {
                        log.debug("Group {} received message {}", index, record.value());
                    }
                });
                numReceived += records.count();
            }
            latch.countDown();
        });
    }
    latch.await(10, TimeUnit.SECONDS);
    final List<KafkaTopicConsumerManager> tcmList = kafkaRequestHandler.getKafkaTopicManagerSharedState().getKafkaTopicConsumerManagerCache().getTopicConsumerManagers(partitionName);
    assertEquals(tcmList.size(), numConsumers);
    // All TCMs share the same topic, so each internal PersistentTopic of TCM has `numConsumers` cursors.
    for (int i = 0; i < numConsumers; i++) {
        assertEquals(tcmList.get(i).getNumCreatedCursors(), numConsumers);
    }
    // Since consumer close will make connection disconnected and all TCMs will be cleared, we should call it after
    // the test is verified.
    consumers.forEach(KafkaConsumer::close);
    Awaitility.await().atMost(Duration.ofSeconds(3)).until(() -> tcmList.get(0).getNumCreatedCursors() == 0);
    for (int i = 0; i < numConsumers; i++) {
        assertEquals(tcmList.get(i).getNumCreatedCursors(), 0);
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Cleanup(lombok.Cleanup) Test(org.testng.annotations.Test) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) AfterMethod(org.testng.annotations.AfterMethod) ManagedCursor(org.apache.bookkeeper.mledger.ManagedCursor) Pair(org.apache.commons.lang3.tuple.Pair) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetadataUtils(io.streamnative.pulsar.handlers.kop.utils.MetadataUtils) Duration(java.time.Duration) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Mockito.doReturn(org.mockito.Mockito.doReturn) Assert.assertFalse(org.testng.Assert.assertFalse) Assert.assertSame(org.testng.Assert.assertSame) TopicPartition(org.apache.kafka.common.TopicPartition) Assert.assertNotEquals(org.testng.Assert.assertNotEquals) BeforeMethod(org.testng.annotations.BeforeMethod) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Assert.assertNotNull(org.testng.Assert.assertNotNull) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) PersistentTopic(org.apache.pulsar.broker.service.persistent.PersistentTopic) Awaitility(org.awaitility.Awaitility) Mockito.mock(org.mockito.Mockito.mock) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) IntStream(java.util.stream.IntStream) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) TopicName(org.apache.pulsar.common.naming.TopicName) Assert.assertNull(org.testng.Assert.assertNull) TopicStats(org.apache.pulsar.common.policies.data.TopicStats) Assert.assertEquals(org.testng.Assert.assertEquals) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) ArrayList(java.util.ArrayList) ChannelHandlerContext(io.netty.channel.ChannelHandlerContext) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) ExecutorService(java.util.concurrent.ExecutorService) KopTopic(io.streamnative.pulsar.handlers.kop.utils.KopTopic) Properties(java.util.Properties) PulsarAdminException(org.apache.pulsar.client.admin.PulsarAdminException) Assert.fail(org.testng.Assert.fail) Channel(io.netty.channel.Channel) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) Assert.assertTrue(org.testng.Assert.assertTrue) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ExecutorService(java.util.concurrent.ExecutorService) KopTopic(io.streamnative.pulsar.handlers.kop.utils.KopTopic) Test(org.testng.annotations.Test)

Example 3 with KopTopic

use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.

the class KafkaTopicConsumerManagerTest method testOnlyOneCursorCreated.

@Test(timeOut = 20000)
public void testOnlyOneCursorCreated() throws Exception {
    final String topic = "testOnlyOneCursorCreated";
    final String partitionName = new KopTopic(topic, "public/default").getPartitionName(0);
    admin.topics().createPartitionedTopic(topic, 1);
    final int numMessages = 100;
    @Cleanup final KafkaProducer<String, String> producer = new KafkaProducer<>(newKafkaProducerProperties());
    for (int i = 0; i < numMessages; i++) {
        producer.send(new ProducerRecord<>(topic, "msg-" + i)).get();
    }
    @Cleanup final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(newKafkaConsumerProperties());
    consumer.subscribe(Collections.singleton(topic));
    int numReceived = 0;
    while (numReceived < numMessages) {
        numReceived += consumer.poll(Duration.ofSeconds(1)).count();
    }
    final List<KafkaTopicConsumerManager> tcmList = kafkaRequestHandler.getKafkaTopicManagerSharedState().getKafkaTopicConsumerManagerCache().getTopicConsumerManagers(partitionName);
    assertFalse(tcmList.isEmpty());
    // Only 1 cursor should be created for a consumer even if there were a lot of FETCH requests
    // This check is to ensure that KafkaTopicConsumerManager#add is called in FETCH request handler
    assertEquals(tcmList.get(0).getCreatedCursors().size(), 1);
    assertEquals(tcmList.get(0).getNumCreatedCursors(), 1);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) KopTopic(io.streamnative.pulsar.handlers.kop.utils.KopTopic) Cleanup(lombok.Cleanup) Test(org.testng.annotations.Test)

Example 4 with KopTopic

use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.

the class KafkaRequestHandlerWithAuthorizationTest method testHandleTxnOffsetCommitPartAuthorizationFailed.

@Test(timeOut = 20000)
public void testHandleTxnOffsetCommitPartAuthorizationFailed() throws ExecutionException, InterruptedException {
    String group = "test-failed-groupId";
    TopicPartition topicPartition1 = new TopicPartition("test1", 1);
    TopicPartition topicPartition2 = new TopicPartition("test2", 1);
    TopicPartition topicPartition3 = new TopicPartition("test3", 1);
    Map<TopicPartition, TxnOffsetCommitRequest.CommittedOffset> offsetData = Maps.newHashMap();
    offsetData.put(topicPartition1, KafkaCommonTestUtils.newTxnOffsetCommitRequestCommittedOffset(1L, ""));
    offsetData.put(topicPartition2, KafkaCommonTestUtils.newTxnOffsetCommitRequestCommittedOffset(1L, ""));
    offsetData.put(topicPartition3, KafkaCommonTestUtils.newTxnOffsetCommitRequestCommittedOffset(1L, ""));
    TxnOffsetCommitRequest.Builder builder = new TxnOffsetCommitRequest.Builder("1", group, 1, (short) 1, offsetData);
    KafkaCommandDecoder.KafkaHeaderAndRequest headerAndRequest = buildRequest(builder);
    // Topic: `test1` authorize success.
    KafkaRequestHandler spyHandler = spy(handler);
    doReturn(CompletableFuture.completedFuture(true)).when(spyHandler).authorize(eq(AclOperation.READ), eq(Resource.of(ResourceType.TOPIC, new KopTopic(topicPartition1.topic(), handler.currentNamespacePrefix()).getFullName())));
    // Handle request
    CompletableFuture<AbstractResponse> responseFuture = new CompletableFuture<>();
    spyHandler.handleTxnOffsetCommit(headerAndRequest, responseFuture);
    AbstractResponse response = responseFuture.get();
    assertTrue(response instanceof TxnOffsetCommitResponse);
    TxnOffsetCommitResponse txnOffsetCommitResponse = (TxnOffsetCommitResponse) response;
    assertEquals(txnOffsetCommitResponse.errorCounts().size(), 2);
    assertEquals(txnOffsetCommitResponse.errors().get(topicPartition1), Errors.NONE);
    assertEquals(txnOffsetCommitResponse.errors().get(topicPartition2), Errors.TOPIC_AUTHORIZATION_FAILED);
    assertEquals(txnOffsetCommitResponse.errors().get(topicPartition3), Errors.TOPIC_AUTHORIZATION_FAILED);
}
Also used : AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) TxnOffsetCommitResponse(org.apache.kafka.common.requests.TxnOffsetCommitResponse) TxnOffsetCommitRequest(org.apache.kafka.common.requests.TxnOffsetCommitRequest) CompletableFuture(java.util.concurrent.CompletableFuture) TopicPartition(org.apache.kafka.common.TopicPartition) KopTopic(io.streamnative.pulsar.handlers.kop.utils.KopTopic) Test(org.testng.annotations.Test)

Example 5 with KopTopic

use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.

the class KafkaRequestHandlerWithAuthorizationTest method testHandleOffsetFetchRequestAuthorizationFailed.

@Test(timeOut = 20000)
public void testHandleOffsetFetchRequestAuthorizationFailed() throws PulsarAdminException, ExecutionException, InterruptedException {
    KafkaRequestHandler spyHandler = spy(handler);
    String topicName = "persistent://" + TENANT + "/" + NAMESPACE + "/" + "testHandleOffsetFetchRequestAuthorizationFailed";
    String groupId = "DemoKafkaOnPulsarConsumer";
    // create partitioned topic.
    admin.topics().createPartitionedTopic(topicName, 1);
    TopicPartition tp = new TopicPartition(new KopTopic(topicName, handler.currentNamespacePrefix()).getFullName(), 0);
    OffsetFetchRequest.Builder builder = new OffsetFetchRequest.Builder(groupId, Collections.singletonList(tp));
    KafkaCommandDecoder.KafkaHeaderAndRequest request = buildRequest(builder);
    CompletableFuture<AbstractResponse> responseFuture = new CompletableFuture<>();
    spyHandler.handleOffsetFetchRequest(request, responseFuture);
    AbstractResponse response = responseFuture.get();
    assertTrue(response instanceof OffsetFetchResponse);
    OffsetFetchResponse offsetFetchResponse = (OffsetFetchResponse) response;
    assertEquals(offsetFetchResponse.responseData().size(), 1);
    assertEquals(offsetFetchResponse.error(), Errors.NONE);
    offsetFetchResponse.responseData().forEach((topicPartition, partitionData) -> {
        assertEquals(partitionData.error, Errors.TOPIC_AUTHORIZATION_FAILED);
    });
}
Also used : OffsetFetchResponse(org.apache.kafka.common.requests.OffsetFetchResponse) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) OffsetFetchRequest(org.apache.kafka.common.requests.OffsetFetchRequest) CompletableFuture(java.util.concurrent.CompletableFuture) TopicPartition(org.apache.kafka.common.TopicPartition) KopTopic(io.streamnative.pulsar.handlers.kop.utils.KopTopic) Test(org.testng.annotations.Test)

Aggregations

KopTopic (io.streamnative.pulsar.handlers.kop.utils.KopTopic)45 CompletableFuture (java.util.concurrent.CompletableFuture)37 TopicPartition (org.apache.kafka.common.TopicPartition)37 ArrayList (java.util.ArrayList)29 List (java.util.List)29 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)29 Errors (org.apache.kafka.common.protocol.Errors)29 KoPTopicException (io.streamnative.pulsar.handlers.kop.exceptions.KoPTopicException)27 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)27 Collections (java.util.Collections)25 Map (java.util.Map)25 Optional (java.util.Optional)25 Collectors (java.util.stream.Collectors)25 Slf4j (lombok.extern.slf4j.Slf4j)25 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)25 PulsarAdminException (org.apache.pulsar.client.admin.PulsarAdminException)25 ByteBuffer (java.nio.ByteBuffer)23 Collection (java.util.Collection)23 Set (java.util.Set)23 Node (org.apache.kafka.common.Node)23