Search in sources :

Example 96 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project camel by apache.

the class KafkaProducerFullTest method producedStringCollectionMessageIsReceivedByKafka.

@Test
public void producedStringCollectionMessageIsReceivedByKafka() throws InterruptedException, IOException {
    int messageInTopic = 10;
    int messageInOtherTopic = 5;
    CountDownLatch messagesLatch = new CountDownLatch(messageInTopic + messageInOtherTopic);
    List<String> msgs = new ArrayList<String>();
    for (int x = 0; x < messageInTopic; x++) {
        msgs.add("Message " + x);
    }
    sendMessagesInRoute(1, stringsTemplate, msgs, KafkaConstants.PARTITION_KEY, "1");
    msgs = new ArrayList<String>();
    for (int x = 0; x < messageInOtherTopic; x++) {
        msgs.add("Other Message " + x);
    }
    sendMessagesInRoute(1, stringsTemplate, msgs, KafkaConstants.PARTITION_KEY, "1", KafkaConstants.TOPIC, TOPIC_STRINGS_IN_HEADER);
    createKafkaMessageConsumer(stringsConsumerConn, TOPIC_STRINGS, TOPIC_STRINGS_IN_HEADER, messagesLatch);
    boolean allMessagesReceived = messagesLatch.await(200, TimeUnit.MILLISECONDS);
    assertTrue("Not all messages were published to the kafka topics. Not received: " + messagesLatch.getCount(), allMessagesReceived);
    List<Exchange> exchangeList = mockEndpoint.getExchanges();
    assertEquals("Two Exchanges are expected", exchangeList.size(), 2);
    Exchange e1 = exchangeList.get(0);
    @SuppressWarnings("unchecked") List<RecordMetadata> recordMetaData1 = (List<RecordMetadata>) (e1.getIn().getHeader(KafkaConstants.KAFKA_RECORDMETA));
    assertEquals("Ten RecordMetadata is expected.", recordMetaData1.size(), 10);
    for (RecordMetadata recordMeta : recordMetaData1) {
        assertTrue("Offset is positive", recordMeta.offset() >= 0);
        assertTrue("Topic Name start with 'test'", recordMeta.topic().startsWith("test"));
    }
    Exchange e2 = exchangeList.get(1);
    @SuppressWarnings("unchecked") List<RecordMetadata> recordMetaData2 = (List<RecordMetadata>) (e2.getIn().getHeader(KafkaConstants.KAFKA_RECORDMETA));
    assertEquals("Five RecordMetadata is expected.", recordMetaData2.size(), 5);
    for (RecordMetadata recordMeta : recordMetaData2) {
        assertTrue("Offset is positive", recordMeta.offset() >= 0);
        assertTrue("Topic Name start with 'test'", recordMeta.topic().startsWith("test"));
    }
}
Also used : ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) Endpoint(org.apache.camel.Endpoint) MockEndpoint(org.apache.camel.component.mock.MockEndpoint) Exchange(org.apache.camel.Exchange) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Example 97 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project camel by apache.

the class KafkaProducerTest method processAsyncSendsMessage.

@Test
public void processAsyncSendsMessage() throws Exception {
    endpoint.getConfiguration().setTopic("sometopic");
    Mockito.when(exchange.getIn()).thenReturn(in);
    Mockito.when(exchange.getOut()).thenReturn(out);
    in.setHeader(KafkaConstants.PARTITION_KEY, 4);
    producer.process(exchange, callback);
    ArgumentCaptor<Callback> callBackCaptor = ArgumentCaptor.forClass(Callback.class);
    Mockito.verify(producer.getKafkaProducer()).send(Matchers.any(ProducerRecord.class), callBackCaptor.capture());
    Callback kafkaCallback = callBackCaptor.getValue();
    kafkaCallback.onCompletion(new RecordMetadata(null, 1, 1), null);
    assertRecordMetadataExists();
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) AsyncCallback(org.apache.camel.AsyncCallback) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Test(org.junit.Test)

Example 98 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project opennms by OpenNMS.

the class KafkaRemoteMessageDispatcherFactory method dispatch.

@Override
public <S extends Message, T extends Message> void dispatch(SinkModule<S, T> module, String topic, T message) {
    try (MDCCloseable mdc = Logging.withPrefixCloseable(MessageConsumerManager.LOG_PREFIX)) {
        LOG.trace("dispatch({}): sending message {}", topic, message);
        final ProducerRecord<String, byte[]> record = new ProducerRecord<>(topic, module.marshal(message));
        try {
            // From KafkaProducer's JavaDoc: The producer is thread safe and should generally be shared among all threads for best performance.
            final Future<RecordMetadata> future = producer.send(record);
            // The call to dispatch() is synchronous, so we block until the message was sent
            future.get();
        } catch (InterruptedException e) {
            LOG.warn("Interrupted while sending message to topic {}.", topic, e);
        } catch (ExecutionException e) {
            LOG.error("Error occured while sending message to topic {}.", topic, e);
        }
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) MDCCloseable(org.opennms.core.logging.Logging.MDCCloseable) ExecutionException(java.util.concurrent.ExecutionException)

Example 99 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project divolte-collector by divolte.

the class KafkaFlusher method sendBatch.

@Override
protected ImmutableList<ProducerRecord<DivolteIdentifier, AvroRecordBuffer>> sendBatch(final List<ProducerRecord<DivolteIdentifier, AvroRecordBuffer>> batch) throws InterruptedException {
    // First start sending the messages.
    // (This will serialize them, determine the partition and then assign them to a per-partition buffer.)
    final int batchSize = batch.size();
    final List<Future<RecordMetadata>> sendResults = batch.stream().map(producer::send).collect(Collectors.toCollection(() -> new ArrayList<>(batchSize)));
    // Force a flush so we can check the results without blocking unnecessarily due to
    // a user-configured flushing policy.
    producer.flush();
    // When finished, each message can be in one of several states.
    // - Completed.
    // - An error occurred, but a retry may succeed.
    // - A fatal error occurred.
    // (In addition, we can be interrupted due to shutdown.)
    final ImmutableList.Builder<ProducerRecord<DivolteIdentifier, AvroRecordBuffer>> remaining = ImmutableList.builder();
    for (int i = 0; i < batchSize; ++i) {
        final Future<RecordMetadata> result = sendResults.get(i);
        try {
            final RecordMetadata metadata = result.get();
            if (logger.isDebugEnabled()) {
                final ProducerRecord<DivolteIdentifier, AvroRecordBuffer> record = batch.get(i);
                logger.debug("Finished sending event (partyId={}) to Kafka: topic/partition/offset = {}/{}/{}", record.key(), metadata.topic(), metadata.partition(), metadata.offset());
            }
        } catch (final ExecutionException e) {
            final Throwable cause = e.getCause();
            final ProducerRecord<DivolteIdentifier, AvroRecordBuffer> record = batch.get(i);
            if (cause instanceof RetriableException) {
                // A retry may succeed.
                if (logger.isDebugEnabled()) {
                    logger.debug("Transient error sending event (partyId=" + record.key() + ") to Kafka. Will retry.", cause);
                }
                remaining.add(record);
            } else {
                // Fatal error.
                logger.error("Error sending event (partyId=" + record.key() + ") to Kafka; abandoning.", cause);
            }
        }
    }
    return remaining.build();
}
Also used : ImmutableList(com.google.common.collect.ImmutableList) ArrayList(java.util.ArrayList) AvroRecordBuffer(io.divolte.server.AvroRecordBuffer) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) DivolteIdentifier(io.divolte.server.DivolteIdentifier) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future) ExecutionException(java.util.concurrent.ExecutionException) RetriableException(org.apache.kafka.common.errors.RetriableException)

Example 100 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project jeesuite-libs by vakinge.

the class DefaultTopicProducer method doSyncSend.

private boolean doSyncSend(String topicName, String messageKey, DefaultMessage message) {
    try {
        Future<RecordMetadata> future = kafkaProducer.send(new ProducerRecord<String, Object>(topicName, messageKey, message.sendBodyOnly() ? message.getBody() : message));
        RecordMetadata metadata = future.get();
        for (ProducerEventHandler handler : eventHanlders) {
            try {
                handler.onSuccessed(topicName, metadata);
            } catch (Exception e) {
            }
        }
        if (log.isDebugEnabled()) {
            log.debug("kafka_send_success,topic=" + topicName + ", messageId=" + messageKey + ", partition=" + metadata.partition() + ", offset=" + metadata.offset());
        }
        return true;
    } catch (Exception ex) {
        log.error("kafka_send_fail,topic=" + topicName + ",messageId=" + messageKey, ex);
        // 同步发送直接抛异常
        throw new RuntimeException(ex);
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ProducerEventHandler(com.jeesuite.kafka.producer.handler.ProducerEventHandler)

Aggregations

RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)191 Test (org.junit.Test)64 Node (org.apache.kafka.common.Node)50 Test (org.junit.jupiter.api.Test)50 TopicPartition (org.apache.kafka.common.TopicPartition)48 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)46 ExecutionException (java.util.concurrent.ExecutionException)35 Callback (org.apache.kafka.clients.producer.Callback)33 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)31 Properties (java.util.Properties)30 HashMap (java.util.HashMap)24 TimeoutException (org.apache.kafka.common.errors.TimeoutException)23 ArrayList (java.util.ArrayList)21 KafkaException (org.apache.kafka.common.KafkaException)19 List (java.util.List)15 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)15 Metrics (org.apache.kafka.common.metrics.Metrics)15 LinkedHashMap (java.util.LinkedHashMap)13 Future (java.util.concurrent.Future)13 Map (java.util.Map)12