use of org.apache.kafka.clients.producer.RecordMetadata in project nifi by apache.
the class PublisherLease method publish.
protected void publish(final FlowFile flowFile, final Map<String, String> additionalAttributes, final byte[] messageKey, final byte[] messageContent, final String topic, final InFlightMessageTracker tracker) {
final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(topic, null, messageKey, messageContent);
addHeaders(flowFile, additionalAttributes, record);
producer.send(record, new Callback() {
@Override
public void onCompletion(final RecordMetadata metadata, final Exception exception) {
if (exception == null) {
tracker.incrementAcknowledgedCount(flowFile);
} else {
tracker.fail(flowFile, exception);
poison();
}
}
});
messagesSent.incrementAndGet();
tracker.incrementSentCount(flowFile);
}
use of org.apache.kafka.clients.producer.RecordMetadata in project nifi by apache.
the class PublisherLease method publish.
private void publish(final FlowFile flowFile, final byte[] messageKey, final byte[] messageContent, final String topic, final InFlightMessageTracker tracker) {
final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(topic, null, messageKey, messageContent);
producer.send(record, new Callback() {
@Override
public void onCompletion(final RecordMetadata metadata, final Exception exception) {
if (exception == null) {
tracker.incrementAcknowledgedCount(flowFile);
} else {
tracker.fail(flowFile, exception);
poison();
}
}
});
tracker.incrementSentCount(flowFile);
}
use of org.apache.kafka.clients.producer.RecordMetadata in project streamsx.kafka by IBMStreams.
the class AtLeastOnceKafkaProducerClient method tryCancelOutstandingSendRequests.
/**
* Tries to cancel all send requests that are not yet done.
*/
@Override
public void tryCancelOutstandingSendRequests(boolean mayInterruptIfRunning) {
if (logger.isDebugEnabled())
logger.debug("TransactionalKafkaProducerClient -- trying to cancel requests");
int nCancelled = 0;
for (Future<RecordMetadata> future : futuresList) {
if (!future.isDone() && future.cancel(mayInterruptIfRunning))
++nCancelled;
}
// $NON-NLS-1$
if (logger.isDebugEnabled())
logger.debug("TransactionalKafkaProducerClient -- number of cancelled send requests: " + nCancelled);
futuresList.clear();
}
use of org.apache.kafka.clients.producer.RecordMetadata in project streamsx.kafka by IBMStreams.
the class TransactionalKafkaProducerClient method tryCancelOutstandingSendRequests.
/**
* Tries to cancel all send requests that are not yet done.
*/
@Override
public void tryCancelOutstandingSendRequests(boolean mayInterruptIfRunning) {
if (logger.isDebugEnabled())
logger.debug("TransactionalKafkaProducerClient -- trying to cancel requests");
int nCancelled = 0;
for (Future<RecordMetadata> future : futuresList) {
if (!future.isDone() && future.cancel(mayInterruptIfRunning))
++nCancelled;
}
// $NON-NLS-1$
if (logger.isDebugEnabled())
logger.debug("TransactionalKafkaProducerClient -- number of cancelled send requests: " + nCancelled);
futuresList.clear();
}
use of org.apache.kafka.clients.producer.RecordMetadata in project camel by apache.
the class KafkaProducerFullTest method producedBytesMessageIsReceivedByKafka.
@Test
public void producedBytesMessageIsReceivedByKafka() throws InterruptedException, IOException {
int messageInTopic = 10;
int messageInOtherTopic = 5;
CountDownLatch messagesLatch = new CountDownLatch(messageInTopic + messageInOtherTopic);
Map<String, Object> inTopicHeaders = new HashMap<String, Object>();
inTopicHeaders.put(KafkaConstants.PARTITION_KEY, "1".getBytes());
sendMessagesInRoute(messageInTopic, bytesTemplate, "IT test message".getBytes(), inTopicHeaders);
Map<String, Object> otherTopicHeaders = new HashMap<String, Object>();
otherTopicHeaders.put(KafkaConstants.PARTITION_KEY, "1".getBytes());
otherTopicHeaders.put(KafkaConstants.TOPIC, TOPIC_BYTES_IN_HEADER);
sendMessagesInRoute(messageInOtherTopic, bytesTemplate, "IT test message in other topic".getBytes(), otherTopicHeaders);
createKafkaBytesMessageConsumer(bytesConsumerConn, TOPIC_BYTES, TOPIC_BYTES_IN_HEADER, messagesLatch);
boolean allMessagesReceived = messagesLatch.await(200, TimeUnit.MILLISECONDS);
assertTrue("Not all messages were published to the kafka topics. Not received: " + messagesLatch.getCount(), allMessagesReceived);
List<Exchange> exchangeList = mockEndpoint.getExchanges();
assertEquals("Fifteen Exchanges are expected", exchangeList.size(), 15);
for (Exchange exchange : exchangeList) {
@SuppressWarnings("unchecked") List<RecordMetadata> recordMetaData1 = (List<RecordMetadata>) (exchange.getIn().getHeader(KafkaConstants.KAFKA_RECORDMETA));
assertEquals("One RecordMetadata is expected.", recordMetaData1.size(), 1);
assertTrue("Offset is positive", recordMetaData1.get(0).offset() >= 0);
assertTrue("Topic Name start with 'test'", recordMetaData1.get(0).topic().startsWith("test"));
}
}
Aggregations