Search in sources :

Example 6 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.

the class MockProducer method send.

/**
 * Adds the record to the list of sent records.
 *
 * @see #history()
 */
@Override
public synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) {
    if (this.closed) {
        throw new IllegalStateException("MockProducer is already closed.");
    }
    if (this.producerFenced) {
        throw new KafkaException("MockProducer is fenced.", new ProducerFencedException("Fenced"));
    }
    if (this.sendException != null) {
        throw this.sendException;
    }
    int partition = 0;
    if (!this.cluster.partitionsForTopic(record.topic()).isEmpty())
        partition = partition(record, this.cluster);
    else {
        // just to throw ClassCastException if serializers are not the proper ones to serialize key/value
        keySerializer.serialize(record.topic(), record.key());
        valueSerializer.serialize(record.topic(), record.value());
    }
    TopicPartition topicPartition = new TopicPartition(record.topic(), partition);
    ProduceRequestResult result = new ProduceRequestResult(topicPartition);
    FutureRecordMetadata future = new FutureRecordMetadata(result, 0, RecordBatch.NO_TIMESTAMP, 0, 0, Time.SYSTEM);
    long offset = nextOffset(topicPartition);
    long baseOffset = Math.max(0, offset - Integer.MAX_VALUE);
    int batchIndex = (int) Math.min(Integer.MAX_VALUE, offset);
    Completion completion = new Completion(offset, new RecordMetadata(topicPartition, baseOffset, batchIndex, RecordBatch.NO_TIMESTAMP, 0, 0), result, callback, topicPartition);
    if (!this.transactionInFlight)
        this.sent.add(record);
    else
        this.uncommittedSends.add(record);
    if (autoComplete)
        completion.complete(null);
    else
        this.completions.addLast(completion);
    return future;
}
Also used : FutureRecordMetadata(org.apache.kafka.clients.producer.internals.FutureRecordMetadata) FutureRecordMetadata(org.apache.kafka.clients.producer.internals.FutureRecordMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ProduceRequestResult(org.apache.kafka.clients.producer.internals.ProduceRequestResult) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException)

Example 7 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.

the class TransactionalMessageCopier method runEventLoop.

public static void runEventLoop(Namespace parsedArgs) {
    final String transactionalId = parsedArgs.getString("transactionalId");
    final String outputTopic = parsedArgs.getString("outputTopic");
    String consumerGroup = parsedArgs.getString("consumerGroup");
    final KafkaProducer<String, String> producer = createProducer(parsedArgs);
    final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
    final AtomicLong remainingMessages = new AtomicLong(parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages"));
    boolean groupMode = parsedArgs.getBoolean("groupMode");
    String topicName = parsedArgs.getString("inputTopic");
    final AtomicLong numMessagesProcessedSinceLastRebalance = new AtomicLong(0);
    final AtomicLong totalMessageProcessed = new AtomicLong(0);
    if (groupMode) {
        consumer.subscribe(Collections.singleton(topicName), new ConsumerRebalanceListener() {

            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                remainingMessages.set(partitions.stream().mapToLong(partition -> messagesRemaining(consumer, partition)).sum());
                numMessagesProcessedSinceLastRebalance.set(0);
                // We use message cap for remaining here as the remainingMessages are not set yet.
                System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "RebalanceComplete"));
            }
        });
    } else {
        TopicPartition inputPartition = new TopicPartition(topicName, parsedArgs.getInt("inputPartition"));
        consumer.assign(singleton(inputPartition));
        remainingMessages.set(Math.min(messagesRemaining(consumer, inputPartition), remainingMessages.get()));
    }
    final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
    producer.initTransactions();
    final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
    Exit.addShutdownHook("transactional-message-copier-shutdown-hook", () -> {
        isShuttingDown.set(true);
        consumer.wakeup();
        System.out.println(shutDownString(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId));
    });
    final boolean useGroupMetadata = parsedArgs.getBoolean("useGroupMetadata");
    try {
        Random random = new Random();
        while (!isShuttingDown.get() && remainingMessages.get() > 0) {
            System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "ProcessLoop"));
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(200));
            if (records.count() > 0) {
                try {
                    producer.beginTransaction();
                    for (ConsumerRecord<String, String> record : records) {
                        producer.send(producerRecordFromConsumerRecord(outputTopic, record));
                    }
                    long messagesSentWithinCurrentTxn = records.count();
                    ConsumerGroupMetadata groupMetadata = useGroupMetadata ? consumer.groupMetadata() : new ConsumerGroupMetadata(consumerGroup);
                    producer.sendOffsetsToTransaction(consumerPositions(consumer), groupMetadata);
                    if (enableRandomAborts && random.nextInt() % 3 == 0) {
                        abortTransactionAndResetPosition(producer, consumer);
                    } else {
                        producer.commitTransaction();
                        remainingMessages.getAndAdd(-messagesSentWithinCurrentTxn);
                        numMessagesProcessedSinceLastRebalance.getAndAdd(messagesSentWithinCurrentTxn);
                        totalMessageProcessed.getAndAdd(messagesSentWithinCurrentTxn);
                    }
                } catch (ProducerFencedException e) {
                    throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId), e);
                } catch (KafkaException e) {
                    log.debug("Aborting transaction after catching exception", e);
                    abortTransactionAndResetPosition(producer, consumer);
                }
            }
        }
    } catch (WakeupException e) {
        if (!isShuttingDown.get()) {
            // as part of shutdown.
            throw e;
        }
    } finally {
        Utils.closeQuietly(producer, "producer");
        Utils.closeQuietly(consumer, "consumer");
    }
}
Also used : ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Exit(org.apache.kafka.common.utils.Exit) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Date(java.util.Date) LoggerFactory(org.slf4j.LoggerFactory) KafkaException(org.apache.kafka.common.KafkaException) SimpleDateFormat(java.text.SimpleDateFormat) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Random(java.util.Random) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Arguments.store(net.sourceforge.argparse4j.impl.Arguments.store) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Collections.singleton(java.util.Collections.singleton) ArgumentParser(net.sourceforge.argparse4j.inf.ArgumentParser) Namespace(net.sourceforge.argparse4j.inf.Namespace) Duration(java.time.Duration) Map(java.util.Map) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) DateFormat(java.text.DateFormat) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) Properties(java.util.Properties) Arguments.storeTrue(net.sourceforge.argparse4j.impl.Arguments.storeTrue) WakeupException(org.apache.kafka.common.errors.WakeupException) ArgumentParsers(net.sourceforge.argparse4j.ArgumentParsers) Collection(java.util.Collection) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) WakeupException(org.apache.kafka.common.errors.WakeupException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Random(java.util.Random) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException)

Example 8 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.

the class StreamThreadTest method shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenSuspendingTasks.

@Test
public void shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenSuspendingTasks() {
    final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
    internalTopologyBuilder.addSource(null, "name", null, null, null, topic1);
    internalTopologyBuilder.addSink("out", "output", null, null, null, "name");
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    activeTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    thread.runOnce();
    assertThat(thread.activeTasks().size(), equalTo(1));
    // need to process a record to enable committing
    addRecord(mockConsumer, 0L);
    thread.runOnce();
    clientSupplier.producers.get(0).commitTransactionException = new ProducerFencedException("Producer is fenced");
    assertThrows(TaskMigratedException.class, () -> thread.rebalanceListener().onPartitionsRevoked(assignedPartitions));
    assertFalse(clientSupplier.producers.get(0).transactionCommitted());
    assertFalse(clientSupplier.producers.get(0).closed());
    assertEquals(1, thread.activeTasks().size());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) Test(org.junit.Test)

Example 9 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.

the class RecordCollectorImpl method recordSendError.

private void recordSendError(final String topic, final Exception exception, final ProducerRecord<byte[], byte[]> serializedRecord) {
    String errorMessage = String.format(SEND_EXCEPTION_MESSAGE, topic, taskId, exception.toString());
    if (isFatalException(exception)) {
        errorMessage += "\nWritten offsets would not be recorded and no more records would be sent since this is a fatal error.";
        sendException.set(new StreamsException(errorMessage, exception));
    } else if (exception instanceof ProducerFencedException || exception instanceof InvalidProducerEpochException || exception instanceof OutOfOrderSequenceException) {
        errorMessage += "\nWritten offsets would not be recorded and no more records would be sent since the producer is fenced, " + "indicating the task may be migrated out";
        sendException.set(new TaskMigratedException(errorMessage, exception));
    } else {
        if (exception instanceof RetriableException) {
            errorMessage += "\nThe broker is either slow or in bad state (like not having enough replicas) in responding the request, " + "or the connection to broker was interrupted sending the request or receiving the response. " + "\nConsider overwriting `max.block.ms` and /or " + "`delivery.timeout.ms` to a larger value to wait longer for such scenarios and avoid timeout errors";
            sendException.set(new TaskCorruptedException(Collections.singleton(taskId)));
        } else {
            if (productionExceptionHandler.handle(serializedRecord, exception) == ProductionExceptionHandlerResponse.FAIL) {
                errorMessage += "\nException handler choose to FAIL the processing, no more records would be sent.";
                sendException.set(new StreamsException(errorMessage, exception));
            } else {
                errorMessage += "\nException handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded.";
                droppedRecordsSensor.record();
            }
        }
    }
    log.error(errorMessage, exception);
}
Also used : InvalidProducerEpochException(org.apache.kafka.common.errors.InvalidProducerEpochException) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) StreamsException(org.apache.kafka.streams.errors.StreamsException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) OutOfOrderSequenceException(org.apache.kafka.common.errors.OutOfOrderSequenceException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) RetriableException(org.apache.kafka.common.errors.RetriableException)

Example 10 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.

the class StreamsProducer method maybeBeginTransaction.

private void maybeBeginTransaction() {
    if (eosEnabled() && !transactionInFlight) {
        try {
            producer.beginTransaction();
            transactionInFlight = true;
        } catch (final ProducerFencedException | InvalidProducerEpochException error) {
            throw new TaskMigratedException(formatException("Producer got fenced trying to begin a new transaction"), error);
        } catch (final KafkaException error) {
            throw new StreamsException(formatException("Error encountered trying to begin a new transaction"), error);
        }
    }
}
Also used : InvalidProducerEpochException(org.apache.kafka.common.errors.InvalidProducerEpochException) StreamsException(org.apache.kafka.streams.errors.StreamsException) KafkaException(org.apache.kafka.common.KafkaException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException)

Aggregations

ProducerFencedException (org.apache.kafka.common.errors.ProducerFencedException)19 KafkaException (org.apache.kafka.common.KafkaException)11 TopicPartition (org.apache.kafka.common.TopicPartition)9 TaskMigratedException (org.apache.kafka.streams.errors.TaskMigratedException)8 HashMap (java.util.HashMap)6 StreamsException (org.apache.kafka.streams.errors.StreamsException)5 Map (java.util.Map)4 OutOfOrderSequenceException (org.apache.kafka.common.errors.OutOfOrderSequenceException)4 HashSet (java.util.HashSet)3 Properties (java.util.Properties)3 Set (java.util.Set)3 TimeoutException (org.apache.kafka.common.errors.TimeoutException)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 Random (java.util.Random)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 Namespace (net.sourceforge.argparse4j.inf.Namespace)2 CommitFailedException (org.apache.kafka.clients.consumer.CommitFailedException)2 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)2