Search in sources :

Example 16 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project apache-kafka-on-k8s by banzaicloud.

the class RecordCollectorImpl method send.

@Override
public <K, V> void send(final String topic, final K key, final V value, final Integer partition, final Long timestamp, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) {
    checkForException();
    final byte[] keyBytes = keySerializer.serialize(topic, key);
    final byte[] valBytes = valueSerializer.serialize(topic, value);
    final ProducerRecord<byte[], byte[]> serializedRecord = new ProducerRecord<>(topic, partition, timestamp, keyBytes, valBytes);
    try {
        producer.send(serializedRecord, new Callback() {

            @Override
            public void onCompletion(final RecordMetadata metadata, final Exception exception) {
                if (exception == null) {
                    if (sendException != null) {
                        return;
                    }
                    final TopicPartition tp = new TopicPartition(metadata.topic(), metadata.partition());
                    offsets.put(tp, metadata.offset());
                } else {
                    if (sendException == null) {
                        if (exception instanceof ProducerFencedException) {
                            log.warn(LOG_MESSAGE, key, value, timestamp, topic, exception.getMessage());
                            sendException = new ProducerFencedException(String.format(EXCEPTION_MESSAGE, logPrefix, "producer got fenced", key, value, timestamp, topic, exception.getMessage()));
                        } else {
                            if (productionExceptionIsFatal(exception)) {
                                recordSendError(key, value, timestamp, topic, exception);
                            } else if (productionExceptionHandler.handle(serializedRecord, exception) == ProductionExceptionHandlerResponse.FAIL) {
                                recordSendError(key, value, timestamp, topic, exception);
                            } else {
                                log.debug(HANDLER_CONTINUED_MESSAGE, key, value, timestamp, topic, exception);
                            }
                        }
                    }
                }
            }
        });
    } catch (final TimeoutException e) {
        log.error("Timeout exception caught when sending record to topic {}. " + "This might happen if the producer cannot send data to the Kafka cluster and thus, " + "its internal buffer fills up. " + "You can increase producer parameter `max.block.ms` to increase this timeout.", topic);
        throw new StreamsException(String.format("%sFailed to send record to topic %s due to timeout.", logPrefix, topic));
    } catch (final Exception uncaughtException) {
        throw new StreamsException(String.format(EXCEPTION_MESSAGE, logPrefix, "an error caught", key, value, timestamp, topic, uncaughtException.getMessage()), uncaughtException);
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) StreamsException(org.apache.kafka.streams.errors.StreamsException) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) RetriableException(org.apache.kafka.common.errors.RetriableException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) SecurityDisabledException(org.apache.kafka.common.errors.SecurityDisabledException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 17 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.

the class ExactlyOnceMessageProcessor method run.

@Override
public void run() {
    // Init transactions call should always happen first in order to clear zombie transactions from previous generation.
    producer.initTransactions();
    final AtomicLong messageRemaining = new AtomicLong(Long.MAX_VALUE);
    consumer.subscribe(Collections.singleton(inputTopic), new ConsumerRebalanceListener() {

        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            printWithTxnId("Revoked partition assignment to kick-off rebalancing: " + partitions);
        }

        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            printWithTxnId("Received partition assignment after rebalancing: " + partitions);
            messageRemaining.set(messagesRemaining(consumer));
        }
    });
    int messageProcessed = 0;
    while (messageRemaining.get() > 0) {
        try {
            ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofMillis(200));
            if (records.count() > 0) {
                // Begin a new transaction session.
                producer.beginTransaction();
                for (ConsumerRecord<Integer, String> record : records) {
                    // Process the record and send to downstream.
                    ProducerRecord<Integer, String> customizedRecord = transform(record);
                    producer.send(customizedRecord);
                }
                Map<TopicPartition, OffsetAndMetadata> offsets = consumerOffsets();
                // Checkpoint the progress by sending offsets to group coordinator broker.
                // Note that this API is only available for broker >= 2.5.
                producer.sendOffsetsToTransaction(offsets, consumer.groupMetadata());
                // Finish the transaction. All sent records should be visible for consumption now.
                producer.commitTransaction();
                messageProcessed += records.count();
            }
        } catch (ProducerFencedException e) {
            throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId));
        } catch (FencedInstanceIdException e) {
            throw new KafkaException(String.format("The group.instance.id %s has been claimed by another process", groupInstanceId));
        } catch (KafkaException e) {
            // If we have not been fenced, try to abort the transaction and continue. This will raise immediately
            // if the producer has hit a fatal error.
            producer.abortTransaction();
            // The consumer fetch position needs to be restored to the committed offset
            // before the transaction started.
            resetToLastCommittedPositions(consumer);
        }
        messageRemaining.set(messagesRemaining(consumer));
        printWithTxnId("Message remaining: " + messageRemaining);
    }
    printWithTxnId("Finished processing " + messageProcessed + " records");
    latch.countDown();
}
Also used : ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) AtomicLong(java.util.concurrent.atomic.AtomicLong) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) KafkaException(org.apache.kafka.common.KafkaException) FencedInstanceIdException(org.apache.kafka.common.errors.FencedInstanceIdException)

Example 18 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.

the class StreamsProducer method commitTransaction.

/**
 * @throws IllegalStateException if EOS is disabled
 * @throws TaskMigratedException
 */
protected void commitTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets, final ConsumerGroupMetadata consumerGroupMetadata) {
    if (!eosEnabled()) {
        throw new IllegalStateException(formatException("Exactly-once is not enabled"));
    }
    maybeBeginTransaction();
    try {
        // EOS-v2 assumes brokers are on version 2.5+ and thus can understand the full set of consumer group metadata
        // Thus if we are using EOS-v1 and can't make this assumption, we must downgrade the request to include only the group id metadata
        final ConsumerGroupMetadata maybeDowngradedGroupMetadata = processingMode == EXACTLY_ONCE_V2 ? consumerGroupMetadata : new ConsumerGroupMetadata(consumerGroupMetadata.groupId());
        producer.sendOffsetsToTransaction(offsets, maybeDowngradedGroupMetadata);
        producer.commitTransaction();
        transactionInFlight = false;
    } catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException error) {
        throw new TaskMigratedException(formatException("Producer got fenced trying to commit a transaction"), error);
    } catch (final TimeoutException timeoutException) {
        // re-throw to trigger `task.timeout.ms`
        throw timeoutException;
    } catch (final KafkaException error) {
        throw new StreamsException(formatException("Error encountered trying to commit a transaction"), error);
    }
}
Also used : InvalidProducerEpochException(org.apache.kafka.common.errors.InvalidProducerEpochException) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) StreamsException(org.apache.kafka.streams.errors.StreamsException) KafkaException(org.apache.kafka.common.KafkaException) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 19 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.

the class StreamThreadTest method shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenCommitting.

@Test
public void shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenCommitting() {
    // only have source but no sink so that we would not get fenced in producer.send
    internalTopologyBuilder.addSource(null, "source", null, null, null, topic1);
    final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
    final MockConsumer<byte[], byte[]> consumer = clientSupplier.consumer;
    consumer.updatePartitions(topic1, Collections.singletonList(new PartitionInfo(topic1, 1, null, null, null)));
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    activeTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    thread.runOnce();
    assertThat(thread.activeTasks().size(), equalTo(1));
    final MockProducer<byte[], byte[]> producer = clientSupplier.producers.get(0);
    producer.commitTransactionException = new ProducerFencedException("Producer is fenced");
    mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
    consumer.addRecord(new ConsumerRecord<>(topic1, 1, 1, new byte[0], new byte[0]));
    try {
        thread.runOnce();
        fail("Should have thrown TaskMigratedException");
    } catch (final KafkaException expected) {
        assertTrue(expected instanceof TaskMigratedException);
        assertTrue("StreamsThread removed the fenced zombie task already, should wait for rebalance to close all zombies together.", thread.activeTasks().stream().anyMatch(task -> task.id().equals(task1)));
    }
    assertThat(producer.commitCount(), equalTo(0L));
    assertTrue(clientSupplier.producers.get(0).transactionInFlight());
    assertFalse(clientSupplier.producers.get(0).transactionCommitted());
    assertFalse(clientSupplier.producers.get(0).closed());
    assertEquals(1, thread.activeTasks().size());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) Test(org.junit.Test)

Aggregations

ProducerFencedException (org.apache.kafka.common.errors.ProducerFencedException)19 KafkaException (org.apache.kafka.common.KafkaException)11 TopicPartition (org.apache.kafka.common.TopicPartition)9 TaskMigratedException (org.apache.kafka.streams.errors.TaskMigratedException)8 HashMap (java.util.HashMap)6 StreamsException (org.apache.kafka.streams.errors.StreamsException)5 Map (java.util.Map)4 OutOfOrderSequenceException (org.apache.kafka.common.errors.OutOfOrderSequenceException)4 HashSet (java.util.HashSet)3 Properties (java.util.Properties)3 Set (java.util.Set)3 TimeoutException (org.apache.kafka.common.errors.TimeoutException)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 Random (java.util.Random)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 Namespace (net.sourceforge.argparse4j.inf.Namespace)2 CommitFailedException (org.apache.kafka.clients.consumer.CommitFailedException)2 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)2