use of org.apache.kafka.clients.producer.ProducerRecord in project logging-log4j2 by apache.
the class KafkaAppenderTest method testAppend.
@Test
public void testAppend() throws Exception {
final Appender appender = ctx.getRequiredAppender("KafkaAppender");
appender.append(createLogEvent());
final List<ProducerRecord<byte[], byte[]>> history = kafka.history();
assertEquals(1, history.size());
final ProducerRecord<byte[], byte[]> item = history.get(0);
assertNotNull(item);
assertEquals(TOPIC_NAME, item.topic());
assertNull(item.key());
assertEquals(LOG_MESSAGE, new String(item.value(), StandardCharsets.UTF_8));
}
use of org.apache.kafka.clients.producer.ProducerRecord in project logging-log4j2 by apache.
the class KafkaManager method send.
public void send(final byte[] msg) throws ExecutionException, InterruptedException, TimeoutException {
if (producer != null) {
ProducerRecord<byte[], byte[]> newRecord = new ProducerRecord<>(topic, msg);
if (syncSend) {
Future<RecordMetadata> response = producer.send(newRecord);
response.get(timeoutMillis, TimeUnit.MILLISECONDS);
} else {
producer.send(newRecord, new Callback() {
public void onCompletion(RecordMetadata metadata, Exception e) {
if (e != null) {
LOGGER.error("Unable to write to Kafka in appender [" + getName() + "]", e);
}
}
});
}
}
}
use of org.apache.kafka.clients.producer.ProducerRecord in project samza by apache.
the class TestZkStreamProcessorBase method produceMessages.
/**
* Produces the provided number of messages to the topic.
*/
protected void produceMessages(final int start, String topic, int numMessages) {
KafkaProducer producer = getKafkaProducer();
for (int i = start; i < numMessages + start; i++) {
try {
LOG.info("producing " + i);
producer.send(new ProducerRecord(topic, i % 2, String.valueOf(i), String.valueOf(i).getBytes())).get();
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}
}
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class WorkerSourceTask method sendRecords.
/**
* Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
* be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
* @return true if all messages were sent, false if some need to be retried
*/
private boolean sendRecords() {
int processed = 0;
for (final SourceRecord preTransformRecord : toSend) {
final SourceRecord record = transformationChain.apply(preTransformRecord);
if (record == null) {
commitTaskRecord(preTransformRecord);
continue;
}
byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value);
log.trace("Appending record with key {}, value {}", record.key(), record.value());
// messages and update the offsets.
synchronized (this) {
if (!lastSendFailed) {
if (!flushing) {
outstandingMessages.put(producerRecord, producerRecord);
} else {
outstandingMessagesBacklog.put(producerRecord, producerRecord);
}
// Offsets are converted & serialized in the OffsetWriter
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
}
}
try {
final String topic = producerRecord.topic();
producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
// Given the default settings for zero data loss, this should basically never happen --
// between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
// timeouts, callbacks with exceptions should never be invoked in practice. If the
// user overrode these settings, the best we can do is notify them of the failure via
// logging.
log.error("{} failed to send record to {}: {}", id, topic, e);
log.debug("Failed record: {}", preTransformRecord);
} else {
log.trace("Wrote record successfully: topic {} partition {} offset {}", recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
commitTaskRecord(preTransformRecord);
}
recordSent(producerRecord);
}
});
lastSendFailed = false;
} catch (RetriableException e) {
log.warn("Failed to send {}, backing off before retrying:", producerRecord, e);
toSend = toSend.subList(processed, toSend.size());
lastSendFailed = true;
return false;
} catch (KafkaException e) {
throw new ConnectException("Unrecoverable exception trying to send", e);
}
processed++;
}
toSend = null;
return true;
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class WorkerSourceTaskTest method testSendRecordsConvertsData.
@Test
public void testSendRecordsConvertsData() throws Exception {
createWorkerTask();
List<SourceRecord> records = new ArrayList<>();
// Can just use the same record for key and value
records.add(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD));
Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", records);
Whitebox.invokeMethod(workerTask, "sendRecords");
assertEquals(SERIALIZED_KEY, sent.getValue().key());
assertEquals(SERIALIZED_RECORD, sent.getValue().value());
PowerMock.verifyAll();
}
Aggregations