use of org.apache.pulsar.client.kafka.compat.PulsarKafkaSchema in project pulsar-adapters by apache.
the class KafkaApiTest method testProducerConsumerMixedSchemaWithPulsarKafkaClient.
@Test
public void testProducerConsumerMixedSchemaWithPulsarKafkaClient() throws Exception {
String topic = "testProducerConsumerMixedSchemaWithPulsarKafkaClient";
Schema<String> keySchema = new PulsarKafkaSchema<>(new StringSerializer(), new StringDeserializer());
JSONSchema<Foo> valueSchema = JSONSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).build());
Properties props = new Properties();
props.put("bootstrap.servers", getPlainTextServiceUrl());
props.put("group.id", "my-subscription-name");
props.put("enable.auto.commit", "false");
props.put("key.serializer", IntegerSerializer.class.getName());
props.put("value.serializer", StringSerializer.class.getName());
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", StringDeserializer.class.getName());
@Cleanup Consumer<String, Foo> consumer = new KafkaConsumer<>(props, keySchema, valueSchema);
consumer.subscribe(Arrays.asList(topic));
Producer<String, Foo> producer = new KafkaProducer<>(props, keySchema, valueSchema);
for (int i = 0; i < 10; i++) {
Foo foo = new Foo();
foo.setField1("field1");
foo.setField2("field2");
foo.setField3(i);
producer.send(new ProducerRecord<>(topic, "hello" + i, foo));
}
producer.flush();
producer.close();
AtomicInteger received = new AtomicInteger();
while (received.get() < 10) {
ConsumerRecords<String, Foo> records = consumer.poll(100);
if (!records.isEmpty()) {
records.forEach(record -> {
String key = record.key();
Assert.assertEquals(key, "hello" + received.get());
Foo value = record.value();
Assert.assertEquals(value.getField1(), "field1");
Assert.assertEquals(value.getField2(), "field2");
Assert.assertEquals(value.getField3(), received.get());
received.incrementAndGet();
});
consumer.commitSync();
}
}
}
use of org.apache.pulsar.client.kafka.compat.PulsarKafkaSchema in project pulsar-adapters by apache.
the class PulsarKafkaConsumer method poll.
@Override
public ConsumerRecords<K, V> poll(long timeoutMillis) {
try {
QueueItem item = receivedMessages.poll(timeoutMillis, TimeUnit.MILLISECONDS);
if (item == null) {
return (ConsumerRecords<K, V>) ConsumerRecords.EMPTY;
}
Map<TopicPartition, List<ConsumerRecord<K, V>>> records = new HashMap<>();
int numberOfRecords = 0;
while (item != null) {
TopicName topicName = TopicName.get(item.consumer.getTopic());
String topic = topicName.getPartitionedTopicName();
int partition = topicName.isPartitioned() ? topicName.getPartitionIndex() : 0;
Message<byte[]> msg = item.message;
MessageIdImpl msgId = (MessageIdImpl) msg.getMessageId();
long offset = MessageIdUtils.getOffset(msgId);
TopicPartition tp = new TopicPartition(topic, partition);
if (lastReceivedOffset.get(tp) == null && !unpolledPartitions.contains(tp)) {
log.info("When polling offsets, invalid offsets were detected. Resetting topic partition {}", tp);
resetOffsets(tp);
}
K key = getKey(topic, msg);
if (valueSchema instanceof PulsarKafkaSchema) {
((PulsarKafkaSchema<V>) valueSchema).setTopic(topic);
}
V value = valueSchema.decode(msg.getData());
ConsumerRecord<K, V> consumerRecord = new ConsumerRecord<>(topic, partition, offset, key, value);
records.computeIfAbsent(tp, k -> new ArrayList<>()).add(consumerRecord);
// Update last offset seen by application
lastReceivedOffset.put(tp, offset);
unpolledPartitions.remove(tp);
if (++numberOfRecords >= maxRecordsInSinglePoll) {
break;
}
// Check if we have an item already available
item = receivedMessages.poll(0, TimeUnit.MILLISECONDS);
}
if (isAutoCommit && !records.isEmpty()) {
// Commit the offset of previously dequeued messages
commitAsync();
}
return new ConsumerRecords<>(records);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
Aggregations