use of org.apache.kafka.streams.internals.StreamsConfigUtils.ProcessingMode.EXACTLY_ONCE_ALPHA in project kafka by apache.
the class TaskExecutor method commitOffsetsOrTransaction.
/**
* Caution: do not invoke this directly if it's possible a rebalance is occurring, as the commit will fail. If
* this is a possibility, prefer the {@link #commitTasksAndMaybeUpdateCommittableOffsets} instead.
*
* @throws TaskMigratedException if committing offsets failed due to CommitFailedException (non-EOS)
* @throws TimeoutException if committing offsets failed due to TimeoutException (non-EOS)
* @throws TaskCorruptedException if committing offsets failed due to TimeoutException (EOS)
*/
void commitOffsetsOrTransaction(final Map<Task, Map<TopicPartition, OffsetAndMetadata>> offsetsPerTask) {
// avoid logging actual Task objects
log.debug("Committing task offsets {}", offsetsPerTask.entrySet().stream().collect(Collectors.toMap(t -> t.getKey().id(), Entry::getValue)));
final Set<TaskId> corruptedTasks = new HashSet<>();
if (!offsetsPerTask.isEmpty()) {
if (processingMode == EXACTLY_ONCE_ALPHA) {
for (final Map.Entry<Task, Map<TopicPartition, OffsetAndMetadata>> taskToCommit : offsetsPerTask.entrySet()) {
final Task task = taskToCommit.getKey();
try {
tasks.streamsProducerForTask(task.id()).commitTransaction(taskToCommit.getValue(), tasks.mainConsumer().groupMetadata());
updateTaskCommitMetadata(taskToCommit.getValue());
} catch (final TimeoutException timeoutException) {
log.error(String.format("Committing task %s failed.", task.id()), timeoutException);
corruptedTasks.add(task.id());
}
}
} else {
final Map<TopicPartition, OffsetAndMetadata> allOffsets = offsetsPerTask.values().stream().flatMap(e -> e.entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
if (processingMode == EXACTLY_ONCE_V2) {
try {
tasks.threadProducer().commitTransaction(allOffsets, tasks.mainConsumer().groupMetadata());
updateTaskCommitMetadata(allOffsets);
} catch (final TimeoutException timeoutException) {
log.error(String.format("Committing task(s) %s failed.", offsetsPerTask.keySet().stream().map(t -> t.id().toString()).collect(Collectors.joining(", "))), timeoutException);
offsetsPerTask.keySet().forEach(task -> corruptedTasks.add(task.id()));
}
} else {
try {
tasks.mainConsumer().commitSync(allOffsets);
updateTaskCommitMetadata(allOffsets);
} catch (final CommitFailedException error) {
throw new TaskMigratedException("Consumer committing offsets failed, " + "indicating the corresponding thread is no longer part of the group", error);
} catch (final TimeoutException timeoutException) {
log.error(String.format("Committing task(s) %s failed.", offsetsPerTask.keySet().stream().map(t -> t.id().toString()).collect(Collectors.joining(", "))), timeoutException);
throw timeoutException;
} catch (final KafkaException error) {
throw new StreamsException("Error encountered committing offsets via consumer", error);
}
}
}
if (!corruptedTasks.isEmpty()) {
throw new TaskCorruptedException(corruptedTasks);
}
}
}
Aggregations