Search in sources :

Example 1 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project apache-kafka-on-k8s by banzaicloud.

the class TransactionalMessageCopier method main.

public static void main(String[] args) throws IOException {
    Namespace parsedArgs = argParser().parseArgsOrFail(args);
    Integer numMessagesPerTransaction = parsedArgs.getInt("messagesPerTransaction");
    final String transactionalId = parsedArgs.getString("transactionalId");
    final String outputTopic = parsedArgs.getString("outputTopic");
    String consumerGroup = parsedArgs.getString("consumerGroup");
    TopicPartition inputPartition = new TopicPartition(parsedArgs.getString("inputTopic"), parsedArgs.getInt("inputPartition"));
    final KafkaProducer<String, String> producer = createProducer(parsedArgs);
    final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
    consumer.assign(singleton(inputPartition));
    long maxMessages = parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages");
    maxMessages = Math.min(messagesRemaining(consumer, inputPartition), maxMessages);
    final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
    producer.initTransactions();
    final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
    final AtomicLong remainingMessages = new AtomicLong(maxMessages);
    final AtomicLong numMessagesProcessed = new AtomicLong(0);
    Runtime.getRuntime().addShutdownHook(new Thread() {

        @Override
        public void run() {
            isShuttingDown.set(true);
            // Flush any remaining messages
            producer.close();
            synchronized (consumer) {
                consumer.close();
            }
            System.out.println(shutDownString(numMessagesProcessed.get(), remainingMessages.get(), transactionalId));
        }
    });
    try {
        Random random = new Random();
        while (0 < remainingMessages.get()) {
            System.out.println(statusAsJson(numMessagesProcessed.get(), remainingMessages.get(), transactionalId));
            if (isShuttingDown.get())
                break;
            int messagesInCurrentTransaction = 0;
            long numMessagesForNextTransaction = Math.min(numMessagesPerTransaction, remainingMessages.get());
            try {
                producer.beginTransaction();
                while (messagesInCurrentTransaction < numMessagesForNextTransaction) {
                    ConsumerRecords<String, String> records = consumer.poll(200L);
                    for (ConsumerRecord<String, String> record : records) {
                        producer.send(producerRecordFromConsumerRecord(outputTopic, record));
                        messagesInCurrentTransaction++;
                    }
                }
                producer.sendOffsetsToTransaction(consumerPositions(consumer), consumerGroup);
                if (enableRandomAborts && random.nextInt() % 3 == 0) {
                    throw new KafkaException("Aborting transaction");
                } else {
                    producer.commitTransaction();
                    remainingMessages.set(maxMessages - numMessagesProcessed.addAndGet(messagesInCurrentTransaction));
                }
            } catch (ProducerFencedException | OutOfOrderSequenceException e) {
                // We cannot recover from these errors, so just rethrow them and let the process fail
                throw e;
            } catch (KafkaException e) {
                producer.abortTransaction();
                resetToLastCommittedPositions(consumer);
            }
        }
    } finally {
        producer.close();
        synchronized (consumer) {
            consumer.close();
        }
    }
    System.exit(0);
}
Also used : Namespace(net.sourceforge.argparse4j.inf.Namespace) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) OutOfOrderSequenceException(org.apache.kafka.common.errors.OutOfOrderSequenceException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) Random(java.util.Random) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException)

Example 2 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project apache-kafka-on-k8s by banzaicloud.

the class StreamTask method initializeTopology.

/**
 * <pre>
 * - (re-)initialize the topology of the task
 * </pre>
 * @throws TaskMigratedException if the task producer got fenced (EOS only)
 */
@Override
public void initializeTopology() {
    initTopology();
    if (eosEnabled) {
        try {
            this.producer.beginTransaction();
        } catch (final ProducerFencedException fatal) {
            throw new TaskMigratedException(this, fatal);
        }
        transactionInFlight = true;
    }
    processorContext.initialized();
    taskInitialized = true;
}
Also used : ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException)

Example 3 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project apache-kafka-on-k8s by banzaicloud.

the class StreamTask method process.

/**
 * Process one record.
 *
 * @return true if this method processes a record, false if it does not process a record.
 * @throws TaskMigratedException if the task producer got fenced (EOS only)
 */
@SuppressWarnings("unchecked")
public boolean process() {
    // get the next record to process
    final StampedRecord record = partitionGroup.nextRecord(recordInfo);
    // if there is no record to process, return immediately
    if (record == null) {
        return false;
    }
    try {
        // process the record by passing to the source node of the topology
        final ProcessorNode currNode = recordInfo.node();
        final TopicPartition partition = recordInfo.partition();
        log.trace("Start processing one record [{}]", record);
        updateProcessorContext(record, currNode);
        currNode.process(record.key(), record.value());
        log.trace("Completed processing one record [{}]", record);
        // update the consumed offset map after processing is done
        consumedOffsets.put(partition, record.offset());
        commitOffsetNeeded = true;
        // decreased to the threshold, we can then resume the consumption on this partition
        if (recordInfo.queue().size() == maxBufferedSize) {
            consumer.resume(singleton(partition));
        }
    } catch (final ProducerFencedException fatal) {
        throw new TaskMigratedException(this, fatal);
    } catch (final KafkaException e) {
        throw new StreamsException(format("Exception caught in process. taskId=%s, processor=%s, topic=%s, partition=%d, offset=%d", id(), processorContext.currentNode().name(), record.topic(), record.partition(), record.offset()), e);
    } finally {
        processorContext.setCurrentNode(null);
    }
    return true;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) StreamsException(org.apache.kafka.streams.errors.StreamsException) KafkaException(org.apache.kafka.common.KafkaException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException)

Example 4 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project apache-kafka-on-k8s by banzaicloud.

the class StreamTask method flushState.

@Override
protected void flushState() {
    log.trace("Flushing state and producer");
    super.flushState();
    try {
        recordCollector.flush();
    } catch (final ProducerFencedException fatal) {
        throw new TaskMigratedException(this, fatal);
    }
}
Also used : ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException)

Example 5 with ProducerFencedException

use of org.apache.kafka.common.errors.ProducerFencedException in project flink by apache.

the class FlinkKafkaProducerITCase method testFailBeforeNotifyAndResumeWorkAfterwards.

/**
 * This tests checks whether FlinkKafkaProducer correctly aborts lingering transactions after a
 * failure. If such transactions were left alone lingering it consumers would be unable to read
 * committed records that were created after this lingering transaction.
 */
@Test
public void testFailBeforeNotifyAndResumeWorkAfterwards() throws Exception {
    String topic = "flink-kafka-producer-fail-before-notify";
    OneInputStreamOperatorTestHarness<Integer, Object> testHarness1 = createTestHarness(topic);
    checkProducerLeak();
    testHarness1.setup();
    testHarness1.open();
    testHarness1.processElement(42, 0);
    testHarness1.snapshot(0, 1);
    testHarness1.processElement(43, 2);
    OperatorSubtaskState snapshot1 = testHarness1.snapshot(1, 3);
    testHarness1.processElement(44, 4);
    testHarness1.snapshot(2, 5);
    testHarness1.processElement(45, 6);
    // do not close previous testHarness to make sure that closing do not clean up something (in
    // case of failure
    // there might not be any close)
    OneInputStreamOperatorTestHarness<Integer, Object> testHarness2 = createTestHarness(topic);
    testHarness2.setup();
    // restore from snapshot1, transactions with records 44 and 45 should be aborted
    testHarness2.initializeState(snapshot1);
    testHarness2.open();
    // write and commit more records, after potentially lingering transactions
    testHarness2.processElement(46, 7);
    testHarness2.snapshot(4, 8);
    testHarness2.processElement(47, 9);
    testHarness2.notifyOfCompletedCheckpoint(4);
    // now we should have:
    // - records 42 and 43 in committed transactions
    // - aborted transactions with records 44 and 45
    // - committed transaction with record 46
    // - pending transaction with record 47
    assertExactlyOnceForTopic(createProperties(), topic, Arrays.asList(42, 43, 46));
    try {
        testHarness1.close();
    } catch (Exception e) {
        // transactional ID.
        if (!(e.getCause() instanceof ProducerFencedException)) {
            fail("Received unexpected exception " + e);
        }
    }
    testHarness2.close();
    deleteTestTopic(topic);
    checkProducerLeak();
}
Also used : OperatorSubtaskState(org.apache.flink.runtime.checkpoint.OperatorSubtaskState) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) Test(org.junit.Test)

Aggregations

ProducerFencedException (org.apache.kafka.common.errors.ProducerFencedException)19 KafkaException (org.apache.kafka.common.KafkaException)11 TopicPartition (org.apache.kafka.common.TopicPartition)9 TaskMigratedException (org.apache.kafka.streams.errors.TaskMigratedException)8 HashMap (java.util.HashMap)6 StreamsException (org.apache.kafka.streams.errors.StreamsException)5 Map (java.util.Map)4 OutOfOrderSequenceException (org.apache.kafka.common.errors.OutOfOrderSequenceException)4 HashSet (java.util.HashSet)3 Properties (java.util.Properties)3 Set (java.util.Set)3 TimeoutException (org.apache.kafka.common.errors.TimeoutException)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)2 Random (java.util.Random)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 Namespace (net.sourceforge.argparse4j.inf.Namespace)2 CommitFailedException (org.apache.kafka.clients.consumer.CommitFailedException)2 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)2