use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.
the class GroupMetadataManagerTest method testCommitOffset.
@Test
public void testCommitOffset() throws Exception {
@Cleanup Consumer<ByteBuffer> consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER).topic(groupMetadataManager.getTopicPartitionName()).subscriptionName("test-sub").subscriptionInitialPosition(SubscriptionInitialPosition.Earliest).subscribe();
String memberId = "fakeMemberId";
TopicPartition topicPartition = new TopicPartition("foo", 0);
groupMetadataManager.addPartitionOwnership(groupPartitionId);
long offset = 37L;
GroupMetadata group = new GroupMetadata(groupId, Empty);
groupMetadataManager.addGroup(group);
Map<TopicPartition, OffsetAndMetadata> offsets = ImmutableMap.<TopicPartition, OffsetAndMetadata>builder().put(topicPartition, OffsetAndMetadata.apply(offset)).build();
Map<TopicPartition, Errors> commitErrors = groupMetadataManager.storeOffsets(group, memberId, offsets).get();
assertTrue(group.hasOffsets());
assertFalse(commitErrors.isEmpty());
Errors maybeError = commitErrors.get(topicPartition);
assertEquals(Errors.NONE, maybeError);
assertTrue(group.hasOffsets());
Map<TopicPartition, PartitionData> cachedOffsets = groupMetadataManager.getOffsets(groupId, Optional.of(Lists.newArrayList(topicPartition)));
PartitionData maybePartitionResponse = cachedOffsets.get(topicPartition);
assertNotNull(maybePartitionResponse);
assertEquals(Errors.NONE, maybePartitionResponse.error);
assertEquals(offset, maybePartitionResponse.offset);
Message<ByteBuffer> message = consumer.receive();
while (message.getValue().array().length == 0) {
// bypass above place holder message.
message = consumer.receive();
}
assertTrue(message.getEventTime() > 0L);
assertTrue(message.hasKey());
byte[] key = message.getKeyBytes();
BaseKey groupKey = GroupMetadataConstants.readMessageKey(ByteBuffer.wrap(key));
assertTrue(groupKey instanceof OffsetKey);
ByteBuffer value = message.getValue();
MemoryRecords memRecords = MemoryRecords.readableRecords(value);
AtomicBoolean verified = new AtomicBoolean(false);
memRecords.batches().forEach(batch -> {
for (Record record : batch) {
assertFalse(verified.get());
BaseKey bk = GroupMetadataConstants.readMessageKey(record.key());
assertTrue(bk instanceof OffsetKey);
OffsetKey ok = (OffsetKey) bk;
GroupTopicPartition gtp = ok.key();
assertEquals(groupId, gtp.group());
assertEquals(new TopicPartition(new KopTopic(topicPartition.topic(), NAMESPACE_PREFIX).getFullName(), topicPartition.partition()), gtp.topicPartition());
OffsetAndMetadata gm = GroupMetadataConstants.readOffsetMessageValue(record.value());
assertEquals(offset, gm.offset());
verified.set(true);
}
});
assertTrue(verified.get());
}
use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.
the class KafkaTopicConsumerManagerTest method testCursorCountForMultiGroups.
@Test(timeOut = 20000)
public void testCursorCountForMultiGroups() throws Exception {
final String topic = "test-cursor-count-for-multi-groups";
final String partitionName = new KopTopic(topic, "public/default").getPartitionName(0);
final int numMessages = 100;
final int numConsumers = 5;
final KafkaProducer<String, String> producer = new KafkaProducer<>(newKafkaProducerProperties());
for (int i = 0; i < numMessages; i++) {
producer.send(new ProducerRecord<>(topic, "msg-" + i)).get();
}
producer.close();
final List<KafkaConsumer<String, String>> consumers = IntStream.range(0, numConsumers).mapToObj(i -> {
final Properties props = newKafkaConsumerProperties();
props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group-" + i);
final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Collections.singleton(topic));
return consumer;
}).collect(Collectors.toList());
final CountDownLatch latch = new CountDownLatch(numConsumers);
final ExecutorService executor = Executors.newFixedThreadPool(numConsumers);
for (int i = 0; i < numConsumers; i++) {
final int index = i;
final KafkaConsumer<String, String> consumer = consumers.get(i);
executor.execute(() -> {
int numReceived = 0;
while (numReceived < numMessages) {
final ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(1));
records.forEach(record -> {
if (log.isDebugEnabled()) {
log.debug("Group {} received message {}", index, record.value());
}
});
numReceived += records.count();
}
latch.countDown();
});
}
latch.await(10, TimeUnit.SECONDS);
final List<KafkaTopicConsumerManager> tcmList = kafkaRequestHandler.getKafkaTopicManagerSharedState().getKafkaTopicConsumerManagerCache().getTopicConsumerManagers(partitionName);
assertEquals(tcmList.size(), numConsumers);
// All TCMs share the same topic, so each internal PersistentTopic of TCM has `numConsumers` cursors.
for (int i = 0; i < numConsumers; i++) {
assertEquals(tcmList.get(i).getNumCreatedCursors(), numConsumers);
}
// Since consumer close will make connection disconnected and all TCMs will be cleared, we should call it after
// the test is verified.
consumers.forEach(KafkaConsumer::close);
Awaitility.await().atMost(Duration.ofSeconds(3)).until(() -> tcmList.get(0).getNumCreatedCursors() == 0);
for (int i = 0; i < numConsumers; i++) {
assertEquals(tcmList.get(i).getNumCreatedCursors(), 0);
}
}
use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.
the class KafkaTopicConsumerManagerTest method testOnlyOneCursorCreated.
@Test(timeOut = 20000)
public void testOnlyOneCursorCreated() throws Exception {
final String topic = "testOnlyOneCursorCreated";
final String partitionName = new KopTopic(topic, "public/default").getPartitionName(0);
admin.topics().createPartitionedTopic(topic, 1);
final int numMessages = 100;
@Cleanup final KafkaProducer<String, String> producer = new KafkaProducer<>(newKafkaProducerProperties());
for (int i = 0; i < numMessages; i++) {
producer.send(new ProducerRecord<>(topic, "msg-" + i)).get();
}
@Cleanup final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(newKafkaConsumerProperties());
consumer.subscribe(Collections.singleton(topic));
int numReceived = 0;
while (numReceived < numMessages) {
numReceived += consumer.poll(Duration.ofSeconds(1)).count();
}
final List<KafkaTopicConsumerManager> tcmList = kafkaRequestHandler.getKafkaTopicManagerSharedState().getKafkaTopicConsumerManagerCache().getTopicConsumerManagers(partitionName);
assertFalse(tcmList.isEmpty());
// Only 1 cursor should be created for a consumer even if there were a lot of FETCH requests
// This check is to ensure that KafkaTopicConsumerManager#add is called in FETCH request handler
assertEquals(tcmList.get(0).getCreatedCursors().size(), 1);
assertEquals(tcmList.get(0).getNumCreatedCursors(), 1);
}
use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.
the class KafkaRequestHandlerWithAuthorizationTest method testHandleTxnOffsetCommitPartAuthorizationFailed.
@Test(timeOut = 20000)
public void testHandleTxnOffsetCommitPartAuthorizationFailed() throws ExecutionException, InterruptedException {
String group = "test-failed-groupId";
TopicPartition topicPartition1 = new TopicPartition("test1", 1);
TopicPartition topicPartition2 = new TopicPartition("test2", 1);
TopicPartition topicPartition3 = new TopicPartition("test3", 1);
Map<TopicPartition, TxnOffsetCommitRequest.CommittedOffset> offsetData = Maps.newHashMap();
offsetData.put(topicPartition1, KafkaCommonTestUtils.newTxnOffsetCommitRequestCommittedOffset(1L, ""));
offsetData.put(topicPartition2, KafkaCommonTestUtils.newTxnOffsetCommitRequestCommittedOffset(1L, ""));
offsetData.put(topicPartition3, KafkaCommonTestUtils.newTxnOffsetCommitRequestCommittedOffset(1L, ""));
TxnOffsetCommitRequest.Builder builder = new TxnOffsetCommitRequest.Builder("1", group, 1, (short) 1, offsetData);
KafkaCommandDecoder.KafkaHeaderAndRequest headerAndRequest = buildRequest(builder);
// Topic: `test1` authorize success.
KafkaRequestHandler spyHandler = spy(handler);
doReturn(CompletableFuture.completedFuture(true)).when(spyHandler).authorize(eq(AclOperation.READ), eq(Resource.of(ResourceType.TOPIC, new KopTopic(topicPartition1.topic(), handler.currentNamespacePrefix()).getFullName())));
// Handle request
CompletableFuture<AbstractResponse> responseFuture = new CompletableFuture<>();
spyHandler.handleTxnOffsetCommit(headerAndRequest, responseFuture);
AbstractResponse response = responseFuture.get();
assertTrue(response instanceof TxnOffsetCommitResponse);
TxnOffsetCommitResponse txnOffsetCommitResponse = (TxnOffsetCommitResponse) response;
assertEquals(txnOffsetCommitResponse.errorCounts().size(), 2);
assertEquals(txnOffsetCommitResponse.errors().get(topicPartition1), Errors.NONE);
assertEquals(txnOffsetCommitResponse.errors().get(topicPartition2), Errors.TOPIC_AUTHORIZATION_FAILED);
assertEquals(txnOffsetCommitResponse.errors().get(topicPartition3), Errors.TOPIC_AUTHORIZATION_FAILED);
}
use of io.streamnative.pulsar.handlers.kop.utils.KopTopic in project starlight-for-kafka by datastax.
the class KafkaRequestHandlerWithAuthorizationTest method testHandleOffsetFetchRequestAuthorizationFailed.
@Test(timeOut = 20000)
public void testHandleOffsetFetchRequestAuthorizationFailed() throws PulsarAdminException, ExecutionException, InterruptedException {
KafkaRequestHandler spyHandler = spy(handler);
String topicName = "persistent://" + TENANT + "/" + NAMESPACE + "/" + "testHandleOffsetFetchRequestAuthorizationFailed";
String groupId = "DemoKafkaOnPulsarConsumer";
// create partitioned topic.
admin.topics().createPartitionedTopic(topicName, 1);
TopicPartition tp = new TopicPartition(new KopTopic(topicName, handler.currentNamespacePrefix()).getFullName(), 0);
OffsetFetchRequest.Builder builder = new OffsetFetchRequest.Builder(groupId, Collections.singletonList(tp));
KafkaCommandDecoder.KafkaHeaderAndRequest request = buildRequest(builder);
CompletableFuture<AbstractResponse> responseFuture = new CompletableFuture<>();
spyHandler.handleOffsetFetchRequest(request, responseFuture);
AbstractResponse response = responseFuture.get();
assertTrue(response instanceof OffsetFetchResponse);
OffsetFetchResponse offsetFetchResponse = (OffsetFetchResponse) response;
assertEquals(offsetFetchResponse.responseData().size(), 1);
assertEquals(offsetFetchResponse.error(), Errors.NONE);
offsetFetchResponse.responseData().forEach((topicPartition, partitionData) -> {
assertEquals(partitionData.error, Errors.TOPIC_AUTHORIZATION_FAILED);
});
}
Aggregations