Search in sources :

Example 21 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project logging-log4j2 by apache.

the class KafkaAppenderTest method testAppend.

@Test
public void testAppend() throws Exception {
    final Appender appender = ctx.getRequiredAppender("KafkaAppender");
    appender.append(createLogEvent());
    final List<ProducerRecord<byte[], byte[]>> history = kafka.history();
    assertEquals(1, history.size());
    final ProducerRecord<byte[], byte[]> item = history.get(0);
    assertNotNull(item);
    assertEquals(TOPIC_NAME, item.topic());
    assertNull(item.key());
    assertEquals(LOG_MESSAGE, new String(item.value(), StandardCharsets.UTF_8));
}
Also used : Appender(org.apache.logging.log4j.core.Appender) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Test(org.junit.Test)

Example 22 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project logging-log4j2 by apache.

the class KafkaManager method send.

public void send(final byte[] msg) throws ExecutionException, InterruptedException, TimeoutException {
    if (producer != null) {
        ProducerRecord<byte[], byte[]> newRecord = new ProducerRecord<>(topic, msg);
        if (syncSend) {
            Future<RecordMetadata> response = producer.send(newRecord);
            response.get(timeoutMillis, TimeUnit.MILLISECONDS);
        } else {
            producer.send(newRecord, new Callback() {

                public void onCompletion(RecordMetadata metadata, Exception e) {
                    if (e != null) {
                        LOGGER.error("Unable to write to Kafka in appender [" + getName() + "]", e);
                    }
                }
            });
        }
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) TimeoutException(java.util.concurrent.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException)

Example 23 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project samza by apache.

the class TestZkStreamProcessorBase method produceMessages.

/**
   * Produces the provided number of messages to the topic.
   */
protected void produceMessages(final int start, String topic, int numMessages) {
    KafkaProducer producer = getKafkaProducer();
    for (int i = start; i < numMessages + start; i++) {
        try {
            LOG.info("producing " + i);
            producer.send(new ProducerRecord(topic, i % 2, String.valueOf(i), String.valueOf(i).getBytes())).get();
        } catch (InterruptedException | ExecutionException e) {
            e.printStackTrace();
        }
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ExecutionException(java.util.concurrent.ExecutionException)

Example 24 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class WorkerSourceTask method sendRecords.

/**
     * Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
     * be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
     * @return true if all messages were sent, false if some need to be retried
     */
private boolean sendRecords() {
    int processed = 0;
    for (final SourceRecord preTransformRecord : toSend) {
        final SourceRecord record = transformationChain.apply(preTransformRecord);
        if (record == null) {
            commitTaskRecord(preTransformRecord);
            continue;
        }
        byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
        byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
        final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value);
        log.trace("Appending record with key {}, value {}", record.key(), record.value());
        // messages and update the offsets.
        synchronized (this) {
            if (!lastSendFailed) {
                if (!flushing) {
                    outstandingMessages.put(producerRecord, producerRecord);
                } else {
                    outstandingMessagesBacklog.put(producerRecord, producerRecord);
                }
                // Offsets are converted & serialized in the OffsetWriter
                offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
            }
        }
        try {
            final String topic = producerRecord.topic();
            producer.send(producerRecord, new Callback() {

                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (e != null) {
                        // Given the default settings for zero data loss, this should basically never happen --
                        // between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
                        // timeouts, callbacks with exceptions should never be invoked in practice. If the
                        // user overrode these settings, the best we can do is notify them of the failure via
                        // logging.
                        log.error("{} failed to send record to {}: {}", id, topic, e);
                        log.debug("Failed record: {}", preTransformRecord);
                    } else {
                        log.trace("Wrote record successfully: topic {} partition {} offset {}", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
                        commitTaskRecord(preTransformRecord);
                    }
                    recordSent(producerRecord);
                }
            });
            lastSendFailed = false;
        } catch (RetriableException e) {
            log.warn("Failed to send {}, backing off before retrying:", producerRecord, e);
            toSend = toSend.subList(processed, toSend.size());
            lastSendFailed = true;
            return false;
        } catch (KafkaException e) {
            throw new ConnectException("Unrecoverable exception trying to send", e);
        }
        processed++;
    }
    toSend = null;
    return true;
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(java.util.concurrent.TimeoutException) RetriableException(org.apache.kafka.common.errors.RetriableException) ExecutionException(java.util.concurrent.ExecutionException) ConnectException(org.apache.kafka.connect.errors.ConnectException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) KafkaException(org.apache.kafka.common.KafkaException) RetriableException(org.apache.kafka.common.errors.RetriableException) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 25 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class WorkerSourceTaskTest method testSendRecordsConvertsData.

@Test
public void testSendRecordsConvertsData() throws Exception {
    createWorkerTask();
    List<SourceRecord> records = new ArrayList<>();
    // Can just use the same record for key and value
    records.add(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD));
    Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();
    PowerMock.replayAll();
    Whitebox.setInternalState(workerTask, "toSend", records);
    Whitebox.invokeMethod(workerTask, "sendRecords");
    assertEquals(SERIALIZED_KEY, sent.getValue().key());
    assertEquals(SERIALIZED_RECORD, sent.getValue().value());
    PowerMock.verifyAll();
}
Also used : ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ArrayList(java.util.ArrayList) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ThreadedTest(org.apache.kafka.connect.util.ThreadedTest) Test(org.junit.Test)

Aggregations

ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)51 Test (org.junit.Test)29 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)14 Future (java.util.concurrent.Future)11 ArrayList (java.util.ArrayList)10 Properties (java.util.Properties)10 Callback (org.apache.kafka.clients.producer.Callback)10 TopicPartition (org.apache.kafka.common.TopicPartition)8 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)7 ExecutionException (java.util.concurrent.ExecutionException)6 TimeoutException (org.apache.kafka.common.errors.TimeoutException)6 Serializer (org.apache.kafka.common.serialization.Serializer)6 MockProcessorContext (org.apache.kafka.test.MockProcessorContext)6 NoOpRecordCollector (org.apache.kafka.test.NoOpRecordCollector)6 MockProducer (org.apache.kafka.clients.producer.MockProducer)5 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)5 SourceRecord (org.apache.kafka.connect.source.SourceRecord)5 HashMap (java.util.HashMap)4 Endpoint (org.apache.camel.Endpoint)4 MockEndpoint (org.apache.camel.component.mock.MockEndpoint)4