Search in sources :

Example 91 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project nifi by apache.

the class PublisherLease method publish.

protected void publish(final FlowFile flowFile, final Map<String, String> additionalAttributes, final byte[] messageKey, final byte[] messageContent, final String topic, final InFlightMessageTracker tracker) {
    final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(topic, null, messageKey, messageContent);
    addHeaders(flowFile, additionalAttributes, record);
    producer.send(record, new Callback() {

        @Override
        public void onCompletion(final RecordMetadata metadata, final Exception exception) {
            if (exception == null) {
                tracker.incrementAcknowledgedCount(flowFile);
            } else {
                tracker.fail(flowFile, exception);
                poison();
            }
        }
    });
    messagesSent.incrementAndGet();
    tracker.incrementSentCount(flowFile);
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) TimeoutException(java.util.concurrent.TimeoutException) SchemaNotFoundException(org.apache.nifi.schema.access.SchemaNotFoundException) TokenTooLargeException(org.apache.nifi.stream.io.exception.TokenTooLargeException) IOException(java.io.IOException)

Example 92 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project nifi by apache.

the class PublisherLease method publish.

private void publish(final FlowFile flowFile, final byte[] messageKey, final byte[] messageContent, final String topic, final InFlightMessageTracker tracker) {
    final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(topic, null, messageKey, messageContent);
    producer.send(record, new Callback() {

        @Override
        public void onCompletion(final RecordMetadata metadata, final Exception exception) {
            if (exception == null) {
                tracker.incrementAcknowledgedCount(flowFile);
            } else {
                tracker.fail(flowFile, exception);
                poison();
            }
        }
    });
    tracker.incrementSentCount(flowFile);
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) TokenTooLargeException(org.apache.nifi.stream.io.exception.TokenTooLargeException)

Example 93 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project streamsx.kafka by IBMStreams.

the class AtLeastOnceKafkaProducerClient method tryCancelOutstandingSendRequests.

/**
 * Tries to cancel all send requests that are not yet done.
 */
@Override
public void tryCancelOutstandingSendRequests(boolean mayInterruptIfRunning) {
    if (logger.isDebugEnabled())
        logger.debug("TransactionalKafkaProducerClient -- trying to cancel requests");
    int nCancelled = 0;
    for (Future<RecordMetadata> future : futuresList) {
        if (!future.isDone() && future.cancel(mayInterruptIfRunning))
            ++nCancelled;
    }
    // $NON-NLS-1$
    if (logger.isDebugEnabled())
        logger.debug("TransactionalKafkaProducerClient -- number of cancelled send requests: " + nCancelled);
    futuresList.clear();
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Checkpoint(com.ibm.streams.operator.state.Checkpoint)

Example 94 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project streamsx.kafka by IBMStreams.

the class TransactionalKafkaProducerClient method tryCancelOutstandingSendRequests.

/**
 * Tries to cancel all send requests that are not yet done.
 */
@Override
public void tryCancelOutstandingSendRequests(boolean mayInterruptIfRunning) {
    if (logger.isDebugEnabled())
        logger.debug("TransactionalKafkaProducerClient -- trying to cancel requests");
    int nCancelled = 0;
    for (Future<RecordMetadata> future : futuresList) {
        if (!future.isDone() && future.cancel(mayInterruptIfRunning))
            ++nCancelled;
    }
    // $NON-NLS-1$
    if (logger.isDebugEnabled())
        logger.debug("TransactionalKafkaProducerClient -- number of cancelled send requests: " + nCancelled);
    futuresList.clear();
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Checkpoint(com.ibm.streams.operator.state.Checkpoint)

Example 95 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project camel by apache.

the class KafkaProducerFullTest method producedBytesMessageIsReceivedByKafka.

@Test
public void producedBytesMessageIsReceivedByKafka() throws InterruptedException, IOException {
    int messageInTopic = 10;
    int messageInOtherTopic = 5;
    CountDownLatch messagesLatch = new CountDownLatch(messageInTopic + messageInOtherTopic);
    Map<String, Object> inTopicHeaders = new HashMap<String, Object>();
    inTopicHeaders.put(KafkaConstants.PARTITION_KEY, "1".getBytes());
    sendMessagesInRoute(messageInTopic, bytesTemplate, "IT test message".getBytes(), inTopicHeaders);
    Map<String, Object> otherTopicHeaders = new HashMap<String, Object>();
    otherTopicHeaders.put(KafkaConstants.PARTITION_KEY, "1".getBytes());
    otherTopicHeaders.put(KafkaConstants.TOPIC, TOPIC_BYTES_IN_HEADER);
    sendMessagesInRoute(messageInOtherTopic, bytesTemplate, "IT test message in other topic".getBytes(), otherTopicHeaders);
    createKafkaBytesMessageConsumer(bytesConsumerConn, TOPIC_BYTES, TOPIC_BYTES_IN_HEADER, messagesLatch);
    boolean allMessagesReceived = messagesLatch.await(200, TimeUnit.MILLISECONDS);
    assertTrue("Not all messages were published to the kafka topics. Not received: " + messagesLatch.getCount(), allMessagesReceived);
    List<Exchange> exchangeList = mockEndpoint.getExchanges();
    assertEquals("Fifteen Exchanges are expected", exchangeList.size(), 15);
    for (Exchange exchange : exchangeList) {
        @SuppressWarnings("unchecked") List<RecordMetadata> recordMetaData1 = (List<RecordMetadata>) (exchange.getIn().getHeader(KafkaConstants.KAFKA_RECORDMETA));
        assertEquals("One RecordMetadata is expected.", recordMetaData1.size(), 1);
        assertTrue("Offset is positive", recordMetaData1.get(0).offset() >= 0);
        assertTrue("Topic Name start with 'test'", recordMetaData1.get(0).topic().startsWith("test"));
    }
}
Also used : HashMap(java.util.HashMap) CountDownLatch(java.util.concurrent.CountDownLatch) Endpoint(org.apache.camel.Endpoint) MockEndpoint(org.apache.camel.component.mock.MockEndpoint) Exchange(org.apache.camel.Exchange) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Aggregations

RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)191 Test (org.junit.Test)64 Node (org.apache.kafka.common.Node)50 Test (org.junit.jupiter.api.Test)50 TopicPartition (org.apache.kafka.common.TopicPartition)48 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)46 ExecutionException (java.util.concurrent.ExecutionException)35 Callback (org.apache.kafka.clients.producer.Callback)33 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)31 Properties (java.util.Properties)30 HashMap (java.util.HashMap)24 TimeoutException (org.apache.kafka.common.errors.TimeoutException)23 ArrayList (java.util.ArrayList)21 KafkaException (org.apache.kafka.common.KafkaException)19 List (java.util.List)15 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)15 Metrics (org.apache.kafka.common.metrics.Metrics)15 LinkedHashMap (java.util.LinkedHashMap)13 Future (java.util.concurrent.Future)13 Map (java.util.Map)12