Search in sources :

Example 1 with KafkaAcknowledgement

use of io.micronaut.configuration.kafka.KafkaAcknowledgement in project micronaut-kafka by micronaut-projects.

the class KafkaConsumerProcessor method processConsumerRecords.

private boolean processConsumerRecords(final ConsumerState consumerState, final ExecutableMethod<?, ?> method, final Map<Argument<?>, Object> boundArguments, final boolean trackPartitions, final Optional<Argument<?>> ackArg, final ConsumerRecords<?, ?> consumerRecords) {
    final ExecutableBinder<ConsumerRecord<?, ?>> executableBinder = new DefaultExecutableBinder<>(boundArguments);
    final Map<TopicPartition, OffsetAndMetadata> currentOffsets = trackPartitions ? new HashMap<>() : null;
    for (final ConsumerRecord<?, ?> consumerRecord : consumerRecords) {
        LOG.trace("Kafka consumer [{}] received record: {}", method, consumerRecord);
        if (trackPartitions) {
            final TopicPartition topicPartition = new TopicPartition(consumerRecord.topic(), consumerRecord.partition());
            final OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(consumerRecord.offset() + 1, null);
            currentOffsets.put(topicPartition, offsetAndMetadata);
        }
        Consumer<?, ?> kafkaConsumer = consumerState.kafkaConsumer;
        ackArg.ifPresent(argument -> boundArguments.put(argument, (KafkaAcknowledgement) () -> kafkaConsumer.commitSync(currentOffsets)));
        try {
            final BoundExecutable boundExecutable = executableBinder.bind(method, binderRegistry, consumerRecord);
            final Object result = boundExecutable.invoke(consumerState.consumerBean);
            if (result != null) {
                final Flux<?> resultFlowable;
                final boolean isBlocking;
                if (Publishers.isConvertibleToPublisher(result)) {
                    resultFlowable = Flux.from(Publishers.convertPublisher(result, Publisher.class));
                    isBlocking = method.hasAnnotation(Blocking.class);
                } else {
                    resultFlowable = Flux.just(result);
                    isBlocking = true;
                }
                handleResultFlux(consumerState, method, consumerRecord, resultFlowable, isBlocking, consumerRecords);
            }
        } catch (Throwable e) {
            if (resolveWithErrorStrategy(consumerState, consumerRecord, e)) {
                return false;
            }
        }
        if (consumerState.offsetStrategy == OffsetStrategy.SYNC_PER_RECORD) {
            try {
                kafkaConsumer.commitSync(currentOffsets);
            } catch (CommitFailedException e) {
                handleException(consumerState, consumerRecord, e);
            }
        } else if (consumerState.offsetStrategy == OffsetStrategy.ASYNC_PER_RECORD) {
            kafkaConsumer.commitAsync(currentOffsets, resolveCommitCallback(consumerState.consumerBean));
        }
    }
    return true;
}
Also used : DefaultExecutableBinder(io.micronaut.core.bind.DefaultExecutableBinder) Blocking(io.micronaut.core.annotation.Blocking) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) BoundExecutable(io.micronaut.core.bind.BoundExecutable) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) KafkaAcknowledgement(io.micronaut.configuration.kafka.KafkaAcknowledgement) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException)

Aggregations

KafkaAcknowledgement (io.micronaut.configuration.kafka.KafkaAcknowledgement)1 Blocking (io.micronaut.core.annotation.Blocking)1 BoundExecutable (io.micronaut.core.bind.BoundExecutable)1 DefaultExecutableBinder (io.micronaut.core.bind.DefaultExecutableBinder)1 CommitFailedException (org.apache.kafka.clients.consumer.CommitFailedException)1 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)1 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)1 TopicPartition (org.apache.kafka.common.TopicPartition)1