use of io.micronaut.configuration.kafka.KafkaMessage in project micronaut-kafka by micronaut-projects.
the class KafkaConsumerProcessor method handleResultFlux.
@SuppressWarnings({ "SubscriberImplementation", "unchecked" })
private void handleResultFlux(ConsumerState consumerState, ExecutableMethod<?, ?> method, ConsumerRecord<?, ?> consumerRecord, Flux<?> resultFlowable, boolean isBlocking, ConsumerRecords<?, ?> consumerRecords) {
Flux<RecordMetadata> recordMetadataProducer = resultFlowable.flatMap((Function<Object, Publisher<RecordMetadata>>) value -> {
if (consumerState.sendToDestinationTopics != null) {
Object key = consumerRecord.key();
if (value != null) {
Producer kafkaProducer;
if (consumerState.useSendOffsetsToTransaction) {
kafkaProducer = transactionalProducerRegistry.getTransactionalProducer(consumerState.producerClientId, consumerState.producerTransactionalId, Argument.of(byte[].class), Argument.of(Object.class));
} else {
kafkaProducer = producerRegistry.getProducer(consumerState.producerClientId == null ? consumerState.groupId : consumerState.producerClientId, Argument.of((Class) (key != null ? key.getClass() : byte[].class)), Argument.of(value.getClass()));
}
return Flux.create(emitter -> {
try {
if (consumerState.useSendOffsetsToTransaction) {
try {
LOG.trace("Beginning transaction for producer: {}", consumerState.producerTransactionalId);
kafkaProducer.beginTransaction();
} catch (ProducerFencedException e) {
handleProducerFencedException(kafkaProducer, e);
}
}
for (String destinationTopic : consumerState.sendToDestinationTopics) {
if (consumerState.isMessagesIterableReturnType) {
Iterable<KafkaMessage> messages = (Iterable<KafkaMessage>) value;
for (KafkaMessage message : messages) {
ProducerRecord record = createFromMessage(destinationTopic, message);
kafkaProducer.send(record, (metadata, exception) -> {
if (exception != null) {
emitter.error(exception);
} else {
emitter.next(metadata);
}
});
}
} else {
ProducerRecord record;
if (consumerState.isMessageReturnType) {
record = createFromMessage(destinationTopic, (KafkaMessage) value);
} else {
record = new ProducerRecord(destinationTopic, null, key, value, consumerRecord.headers());
}
LOG.trace("Sending record: {} for producer: {} {}", record, kafkaProducer, consumerState.producerTransactionalId);
kafkaProducer.send(record, (metadata, exception) -> {
if (exception != null) {
emitter.error(exception);
} else {
emitter.next(metadata);
}
});
}
}
if (consumerState.useSendOffsetsToTransaction) {
Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>();
for (TopicPartition partition : consumerRecords.partitions()) {
List<? extends ConsumerRecord<?, ?>> partitionedRecords = consumerRecords.records(partition);
long offset = partitionedRecords.get(partitionedRecords.size() - 1).offset();
offsetsToCommit.put(partition, new OffsetAndMetadata(offset + 1));
}
try {
LOG.trace("Sending offsets: {} to transaction for producer: {} and customer group id: {}", offsetsToCommit, consumerState.producerTransactionalId, consumerState.groupId);
kafkaProducer.sendOffsetsToTransaction(offsetsToCommit, consumerState.groupId);
LOG.trace("Committing transaction for producer: {}", consumerState.producerTransactionalId);
kafkaProducer.commitTransaction();
LOG.trace("Committed transaction for producer: {}", consumerState.producerTransactionalId);
} catch (ProducerFencedException e) {
handleProducerFencedException(kafkaProducer, e);
}
}
emitter.complete();
} catch (Exception e) {
if (consumerState.useSendOffsetsToTransaction) {
try {
LOG.trace("Aborting transaction for producer: {} because of error: {}", consumerState.producerTransactionalId, e.getMessage());
kafkaProducer.abortTransaction();
} catch (ProducerFencedException ex) {
handleProducerFencedException(kafkaProducer, ex);
}
}
emitter.error(e);
}
});
}
return Flux.empty();
}
return Flux.empty();
});
recordMetadataProducer = recordMetadataProducer.onErrorResume((Function<Throwable, Publisher<RecordMetadata>>) throwable -> {
handleException(consumerState.consumerBean, new KafkaListenerException("Error occurred processing record [" + consumerRecord + "] with Kafka reactive consumer [" + method + "]: " + throwable.getMessage(), throwable, consumerState.consumerBean, consumerState.kafkaConsumer, consumerRecord));
if (consumerState.redelivery) {
LOG.debug("Attempting redelivery of record [{}] following error", consumerRecord);
Object key = consumerRecord.key();
Object value = consumerRecord.value();
if (key != null && value != null) {
Producer kafkaProducer = producerRegistry.getProducer(consumerState.producerClientId == null ? consumerState.groupId : consumerState.producerClientId, Argument.of(key.getClass()), Argument.of(value.getClass()));
ProducerRecord record = new ProducerRecord(consumerRecord.topic(), consumerRecord.partition(), key, value, consumerRecord.headers());
return producerSend(consumerState, kafkaProducer, record).doOnError(ex -> {
handleException(consumerState.consumerBean, new KafkaListenerException("Redelivery failed for record [" + consumerRecord + "] with Kafka reactive consumer [" + method + "]: " + throwable.getMessage(), throwable, consumerState.consumerBean, consumerState.kafkaConsumer, consumerRecord));
});
}
}
return Flux.empty();
});
if (isBlocking) {
List<RecordMetadata> listRecords = recordMetadataProducer.collectList().block();
LOG.trace("Method [{}] produced record metadata: {}", method, listRecords);
} else {
recordMetadataProducer.subscribe(recordMetadata -> LOG.trace("Method [{}] produced record metadata: {}", logMethod(method), recordMetadata));
}
}
Aggregations