use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class WorkerSourceTask method convertTransformedRecord.
/**
* Convert the source record into a producer record.
*
* @param record the transformed record
* @return the producer record which can sent over to Kafka. A null is returned if the input is null or
* if an error was encountered during any of the converter stages.
*/
private ProducerRecord<byte[], byte[]> convertTransformedRecord(SourceRecord record) {
if (record == null) {
return null;
}
RecordHeaders headers = retryWithToleranceOperator.execute(() -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverter.getClass());
byte[] key = retryWithToleranceOperator.execute(() -> keyConverter.fromConnectData(record.topic(), headers, record.keySchema(), record.key()), Stage.KEY_CONVERTER, keyConverter.getClass());
byte[] value = retryWithToleranceOperator.execute(() -> valueConverter.fromConnectData(record.topic(), headers, record.valueSchema(), record.value()), Stage.VALUE_CONVERTER, valueConverter.getClass());
if (retryWithToleranceOperator.failed()) {
return null;
}
return new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class ProducerInterceptorsTest method testOnAcknowledgementWithErrorChain.
@Test
public void testOnAcknowledgementWithErrorChain() {
List<ProducerInterceptor<Integer, String>> interceptorList = new ArrayList<>();
AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One");
interceptorList.add(interceptor1);
ProducerInterceptors<Integer, String> interceptors = new ProducerInterceptors<>(interceptorList);
// verify that metadata contains both topic and partition
interceptors.onSendError(producerRecord, new TopicPartition(producerRecord.topic(), producerRecord.partition()), new KafkaException("Test"));
assertEquals(1, onErrorAckCount);
assertEquals(1, onErrorAckWithTopicPartitionSetCount);
// verify that metadata contains both topic and partition (because record already contains partition)
interceptors.onSendError(producerRecord, null, new KafkaException("Test"));
assertEquals(2, onErrorAckCount);
assertEquals(2, onErrorAckWithTopicPartitionSetCount);
// if producer record does not contain partition, interceptor should get partition == -1
ProducerRecord<Integer, String> record2 = new ProducerRecord<>("test2", null, 1, "value");
interceptors.onSendError(record2, null, new KafkaException("Test"));
assertEquals(3, onErrorAckCount);
assertEquals(3, onErrorAckWithTopicSetCount);
assertEquals(2, onErrorAckWithTopicPartitionSetCount);
// if producer record does not contain partition, but topic/partition is passed to
// onSendError, then interceptor should get valid partition
int reassignedPartition = producerRecord.partition() + 1;
interceptors.onSendError(record2, new TopicPartition(record2.topic(), reassignedPartition), new KafkaException("Test"));
assertEquals(4, onErrorAckCount);
assertEquals(4, onErrorAckWithTopicSetCount);
assertEquals(3, onErrorAckWithTopicPartitionSetCount);
// if both record and topic/partition are null, interceptor should not receive metadata
interceptors.onSendError(null, null, new KafkaException("Test"));
assertEquals(5, onErrorAckCount);
assertEquals(4, onErrorAckWithTopicSetCount);
assertEquals(3, onErrorAckWithTopicPartitionSetCount);
interceptors.close();
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class WorkerSourceTaskTest method testSendRecordsPropagatesTimestamp.
@Test
public void testSendRecordsPropagatesTimestamp() throws Exception {
final Long timestamp = System.currentTimeMillis();
createWorkerTask();
List<SourceRecord> records = Collections.singletonList(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp));
Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();
expectTopicCreation(TOPIC);
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", records);
Whitebox.invokeMethod(workerTask, "sendRecords");
assertEquals(timestamp, sent.getValue().timestamp());
PowerMock.verifyAll();
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class ErrorReporterTest method testDlqHeaderIsAppended.
@Test
public void testDlqHeaderIsAppended() {
Map<String, String> props = new HashMap<>();
props.put(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC);
props.put(SinkConnectorConfig.DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, "true");
DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter(producer, config(props), TASK_ID, errorHandlingMetrics);
ProcessingContext context = new ProcessingContext();
context.consumerRecord(new ConsumerRecord<>("source-topic", 7, 10, "source-key".getBytes(), "source-value".getBytes()));
context.currentContext(Stage.TRANSFORMATION, Transformation.class);
context.error(new ConnectException("Test Exception"));
ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(DLQ_TOPIC, "source-key".getBytes(), "source-value".getBytes());
producerRecord.headers().add(ERROR_HEADER_ORIG_TOPIC, "dummy".getBytes());
deadLetterQueueReporter.populateContextHeaders(producerRecord, context);
int appearances = 0;
for (Header header : producerRecord.headers()) {
if (ERROR_HEADER_ORIG_TOPIC.equalsIgnoreCase(header.key())) {
appearances++;
}
}
assertEquals("source-topic", headerValue(producerRecord, ERROR_HEADER_ORIG_TOPIC));
assertEquals(2, appearances);
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.
the class SmokeTestDriver method generatePerpetually.
static void generatePerpetually(final String kafka, final int numKeys, final int maxRecordsPerKey) {
final Properties producerProps = generatorProperties(kafka);
int numRecordsProduced = 0;
final ValueList[] data = new ValueList[numKeys];
for (int i = 0; i < numKeys; i++) {
data[i] = new ValueList(i, i + maxRecordsPerKey - 1);
}
final Random rand = new Random();
try (final KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps)) {
while (true) {
final int index = rand.nextInt(numKeys);
final String key = data[index].key;
final int value = data[index].next();
final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>("data", stringSerde.serializer().serialize("", key), intSerde.serializer().serialize("", value));
producer.send(record);
numRecordsProduced++;
if (numRecordsProduced % 100 == 0) {
System.out.println(Instant.now() + " " + numRecordsProduced + " records produced");
}
Utils.sleep(2);
}
}
}
Aggregations