use of io.micronaut.configuration.kafka.KafkaAcknowledgement in project micronaut-kafka by micronaut-projects.
the class KafkaConsumerProcessor method processConsumerRecords.
private boolean processConsumerRecords(final ConsumerState consumerState, final ExecutableMethod<?, ?> method, final Map<Argument<?>, Object> boundArguments, final boolean trackPartitions, final Optional<Argument<?>> ackArg, final ConsumerRecords<?, ?> consumerRecords) {
final ExecutableBinder<ConsumerRecord<?, ?>> executableBinder = new DefaultExecutableBinder<>(boundArguments);
final Map<TopicPartition, OffsetAndMetadata> currentOffsets = trackPartitions ? new HashMap<>() : null;
for (final ConsumerRecord<?, ?> consumerRecord : consumerRecords) {
LOG.trace("Kafka consumer [{}] received record: {}", method, consumerRecord);
if (trackPartitions) {
final TopicPartition topicPartition = new TopicPartition(consumerRecord.topic(), consumerRecord.partition());
final OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(consumerRecord.offset() + 1, null);
currentOffsets.put(topicPartition, offsetAndMetadata);
}
Consumer<?, ?> kafkaConsumer = consumerState.kafkaConsumer;
ackArg.ifPresent(argument -> boundArguments.put(argument, (KafkaAcknowledgement) () -> kafkaConsumer.commitSync(currentOffsets)));
try {
final BoundExecutable boundExecutable = executableBinder.bind(method, binderRegistry, consumerRecord);
final Object result = boundExecutable.invoke(consumerState.consumerBean);
if (result != null) {
final Flux<?> resultFlowable;
final boolean isBlocking;
if (Publishers.isConvertibleToPublisher(result)) {
resultFlowable = Flux.from(Publishers.convertPublisher(result, Publisher.class));
isBlocking = method.hasAnnotation(Blocking.class);
} else {
resultFlowable = Flux.just(result);
isBlocking = true;
}
handleResultFlux(consumerState, method, consumerRecord, resultFlowable, isBlocking, consumerRecords);
}
} catch (Throwable e) {
if (resolveWithErrorStrategy(consumerState, consumerRecord, e)) {
return false;
}
}
if (consumerState.offsetStrategy == OffsetStrategy.SYNC_PER_RECORD) {
try {
kafkaConsumer.commitSync(currentOffsets);
} catch (CommitFailedException e) {
handleException(consumerState, consumerRecord, e);
}
} else if (consumerState.offsetStrategy == OffsetStrategy.ASYNC_PER_RECORD) {
kafkaConsumer.commitAsync(currentOffsets, resolveCommitCallback(consumerState.consumerBean));
}
}
return true;
}
Aggregations