use of org.apache.kafka.common.errors.ProducerFencedException in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorImpl method send.
@Override
public <K, V> void send(final String topic, final K key, final V value, final Integer partition, final Long timestamp, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) {
checkForException();
final byte[] keyBytes = keySerializer.serialize(topic, key);
final byte[] valBytes = valueSerializer.serialize(topic, value);
final ProducerRecord<byte[], byte[]> serializedRecord = new ProducerRecord<>(topic, partition, timestamp, keyBytes, valBytes);
try {
producer.send(serializedRecord, new Callback() {
@Override
public void onCompletion(final RecordMetadata metadata, final Exception exception) {
if (exception == null) {
if (sendException != null) {
return;
}
final TopicPartition tp = new TopicPartition(metadata.topic(), metadata.partition());
offsets.put(tp, metadata.offset());
} else {
if (sendException == null) {
if (exception instanceof ProducerFencedException) {
log.warn(LOG_MESSAGE, key, value, timestamp, topic, exception.getMessage());
sendException = new ProducerFencedException(String.format(EXCEPTION_MESSAGE, logPrefix, "producer got fenced", key, value, timestamp, topic, exception.getMessage()));
} else {
if (productionExceptionIsFatal(exception)) {
recordSendError(key, value, timestamp, topic, exception);
} else if (productionExceptionHandler.handle(serializedRecord, exception) == ProductionExceptionHandlerResponse.FAIL) {
recordSendError(key, value, timestamp, topic, exception);
} else {
log.debug(HANDLER_CONTINUED_MESSAGE, key, value, timestamp, topic, exception);
}
}
}
}
}
});
} catch (final TimeoutException e) {
log.error("Timeout exception caught when sending record to topic {}. " + "This might happen if the producer cannot send data to the Kafka cluster and thus, " + "its internal buffer fills up. " + "You can increase producer parameter `max.block.ms` to increase this timeout.", topic);
throw new StreamsException(String.format("%sFailed to send record to topic %s due to timeout.", logPrefix, topic));
} catch (final Exception uncaughtException) {
throw new StreamsException(String.format(EXCEPTION_MESSAGE, logPrefix, "an error caught", key, value, timestamp, topic, uncaughtException.getMessage()), uncaughtException);
}
}
use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.
the class ExactlyOnceMessageProcessor method run.
@Override
public void run() {
// Init transactions call should always happen first in order to clear zombie transactions from previous generation.
producer.initTransactions();
final AtomicLong messageRemaining = new AtomicLong(Long.MAX_VALUE);
consumer.subscribe(Collections.singleton(inputTopic), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
printWithTxnId("Revoked partition assignment to kick-off rebalancing: " + partitions);
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
printWithTxnId("Received partition assignment after rebalancing: " + partitions);
messageRemaining.set(messagesRemaining(consumer));
}
});
int messageProcessed = 0;
while (messageRemaining.get() > 0) {
try {
ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofMillis(200));
if (records.count() > 0) {
// Begin a new transaction session.
producer.beginTransaction();
for (ConsumerRecord<Integer, String> record : records) {
// Process the record and send to downstream.
ProducerRecord<Integer, String> customizedRecord = transform(record);
producer.send(customizedRecord);
}
Map<TopicPartition, OffsetAndMetadata> offsets = consumerOffsets();
// Checkpoint the progress by sending offsets to group coordinator broker.
// Note that this API is only available for broker >= 2.5.
producer.sendOffsetsToTransaction(offsets, consumer.groupMetadata());
// Finish the transaction. All sent records should be visible for consumption now.
producer.commitTransaction();
messageProcessed += records.count();
}
} catch (ProducerFencedException e) {
throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId));
} catch (FencedInstanceIdException e) {
throw new KafkaException(String.format("The group.instance.id %s has been claimed by another process", groupInstanceId));
} catch (KafkaException e) {
// If we have not been fenced, try to abort the transaction and continue. This will raise immediately
// if the producer has hit a fatal error.
producer.abortTransaction();
// The consumer fetch position needs to be restored to the committed offset
// before the transaction started.
resetToLastCommittedPositions(consumer);
}
messageRemaining.set(messagesRemaining(consumer));
printWithTxnId("Message remaining: " + messageRemaining);
}
printWithTxnId("Finished processing " + messageProcessed + " records");
latch.countDown();
}
use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.
the class StreamsProducer method commitTransaction.
/**
* @throws IllegalStateException if EOS is disabled
* @throws TaskMigratedException
*/
protected void commitTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets, final ConsumerGroupMetadata consumerGroupMetadata) {
if (!eosEnabled()) {
throw new IllegalStateException(formatException("Exactly-once is not enabled"));
}
maybeBeginTransaction();
try {
// EOS-v2 assumes brokers are on version 2.5+ and thus can understand the full set of consumer group metadata
// Thus if we are using EOS-v1 and can't make this assumption, we must downgrade the request to include only the group id metadata
final ConsumerGroupMetadata maybeDowngradedGroupMetadata = processingMode == EXACTLY_ONCE_V2 ? consumerGroupMetadata : new ConsumerGroupMetadata(consumerGroupMetadata.groupId());
producer.sendOffsetsToTransaction(offsets, maybeDowngradedGroupMetadata);
producer.commitTransaction();
transactionInFlight = false;
} catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException error) {
throw new TaskMigratedException(formatException("Producer got fenced trying to commit a transaction"), error);
} catch (final TimeoutException timeoutException) {
// re-throw to trigger `task.timeout.ms`
throw timeoutException;
} catch (final KafkaException error) {
throw new StreamsException(formatException("Error encountered trying to commit a transaction"), error);
}
}
use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.
the class StreamThreadTest method shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenCommitting.
@Test
public void shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenCommitting() {
// only have source but no sink so that we would not get fenced in producer.send
internalTopologyBuilder.addSource(null, "source", null, null, null, topic1);
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
final MockConsumer<byte[], byte[]> consumer = clientSupplier.consumer;
consumer.updatePartitions(topic1, Collections.singletonList(new PartitionInfo(topic1, 1, null, null, null)));
thread.setState(StreamThread.State.STARTING);
thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final List<TopicPartition> assignedPartitions = new ArrayList<>();
// assign single partition
assignedPartitions.add(t1p1);
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(assignedPartitions);
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
assertThat(thread.activeTasks().size(), equalTo(1));
final MockProducer<byte[], byte[]> producer = clientSupplier.producers.get(0);
producer.commitTransactionException = new ProducerFencedException("Producer is fenced");
mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
consumer.addRecord(new ConsumerRecord<>(topic1, 1, 1, new byte[0], new byte[0]));
try {
thread.runOnce();
fail("Should have thrown TaskMigratedException");
} catch (final KafkaException expected) {
assertTrue(expected instanceof TaskMigratedException);
assertTrue("StreamsThread removed the fenced zombie task already, should wait for rebalance to close all zombies together.", thread.activeTasks().stream().anyMatch(task -> task.id().equals(task1)));
}
assertThat(producer.commitCount(), equalTo(0L));
assertTrue(clientSupplier.producers.get(0).transactionInFlight());
assertFalse(clientSupplier.producers.get(0).transactionCommitted());
assertFalse(clientSupplier.producers.get(0).closed());
assertEquals(1, thread.activeTasks().size());
}
Aggregations