use of org.apache.kafka.common.errors.InvalidProducerEpochException in project kafka by apache.
the class RecordCollectorImpl method recordSendError.
private void recordSendError(final String topic, final Exception exception, final ProducerRecord<byte[], byte[]> serializedRecord) {
String errorMessage = String.format(SEND_EXCEPTION_MESSAGE, topic, taskId, exception.toString());
if (isFatalException(exception)) {
errorMessage += "\nWritten offsets would not be recorded and no more records would be sent since this is a fatal error.";
sendException.set(new StreamsException(errorMessage, exception));
} else if (exception instanceof ProducerFencedException || exception instanceof InvalidProducerEpochException || exception instanceof OutOfOrderSequenceException) {
errorMessage += "\nWritten offsets would not be recorded and no more records would be sent since the producer is fenced, " + "indicating the task may be migrated out";
sendException.set(new TaskMigratedException(errorMessage, exception));
} else {
if (exception instanceof RetriableException) {
errorMessage += "\nThe broker is either slow or in bad state (like not having enough replicas) in responding the request, " + "or the connection to broker was interrupted sending the request or receiving the response. " + "\nConsider overwriting `max.block.ms` and /or " + "`delivery.timeout.ms` to a larger value to wait longer for such scenarios and avoid timeout errors";
sendException.set(new TaskCorruptedException(Collections.singleton(taskId)));
} else {
if (productionExceptionHandler.handle(serializedRecord, exception) == ProductionExceptionHandlerResponse.FAIL) {
errorMessage += "\nException handler choose to FAIL the processing, no more records would be sent.";
sendException.set(new StreamsException(errorMessage, exception));
} else {
errorMessage += "\nException handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded.";
droppedRecordsSensor.record();
}
}
}
log.error(errorMessage, exception);
}
use of org.apache.kafka.common.errors.InvalidProducerEpochException in project kafka by apache.
the class StreamsProducer method maybeBeginTransaction.
private void maybeBeginTransaction() {
if (eosEnabled() && !transactionInFlight) {
try {
producer.beginTransaction();
transactionInFlight = true;
} catch (final ProducerFencedException | InvalidProducerEpochException error) {
throw new TaskMigratedException(formatException("Producer got fenced trying to begin a new transaction"), error);
} catch (final KafkaException error) {
throw new StreamsException(formatException("Error encountered trying to begin a new transaction"), error);
}
}
}
use of org.apache.kafka.common.errors.InvalidProducerEpochException in project kafka by apache.
the class StreamsProducer method commitTransaction.
/**
* @throws IllegalStateException if EOS is disabled
* @throws TaskMigratedException
*/
protected void commitTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets, final ConsumerGroupMetadata consumerGroupMetadata) {
if (!eosEnabled()) {
throw new IllegalStateException(formatException("Exactly-once is not enabled"));
}
maybeBeginTransaction();
try {
// EOS-v2 assumes brokers are on version 2.5+ and thus can understand the full set of consumer group metadata
// Thus if we are using EOS-v1 and can't make this assumption, we must downgrade the request to include only the group id metadata
final ConsumerGroupMetadata maybeDowngradedGroupMetadata = processingMode == EXACTLY_ONCE_V2 ? consumerGroupMetadata : new ConsumerGroupMetadata(consumerGroupMetadata.groupId());
producer.sendOffsetsToTransaction(offsets, maybeDowngradedGroupMetadata);
producer.commitTransaction();
transactionInFlight = false;
} catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException error) {
throw new TaskMigratedException(formatException("Producer got fenced trying to commit a transaction"), error);
} catch (final TimeoutException timeoutException) {
// re-throw to trigger `task.timeout.ms`
throw timeoutException;
} catch (final KafkaException error) {
throw new StreamsException(formatException("Error encountered trying to commit a transaction"), error);
}
}
Aggregations