use of org.apache.kafka.clients.producer.ProducerRecord in project nakadi by zalando.
the class KafkaTopicRepository method publishItem.
private static CompletableFuture<Exception> publishItem(final Producer<String, String> producer, final String topicId, final BatchItem item, final HystrixKafkaCircuitBreaker circuitBreaker) throws EventPublishingException {
try {
final CompletableFuture<Exception> result = new CompletableFuture<>();
final ProducerRecord<String, String> kafkaRecord = new ProducerRecord<>(topicId, KafkaCursor.toKafkaPartition(item.getPartition()), item.getPartition(), item.dumpEventToString());
circuitBreaker.markStart();
producer.send(kafkaRecord, ((metadata, exception) -> {
if (null != exception) {
LOG.warn("Failed to publish to kafka topic {}", topicId, exception);
item.updateStatusAndDetail(EventPublishingStatus.FAILED, "internal error");
if (hasKafkaConnectionException(exception)) {
circuitBreaker.markFailure();
} else {
circuitBreaker.markSuccessfully();
}
result.complete(exception);
} else {
item.updateStatusAndDetail(EventPublishingStatus.SUBMITTED, "");
circuitBreaker.markSuccessfully();
result.complete(null);
}
}));
return result;
} catch (final InterruptException e) {
Thread.currentThread().interrupt();
circuitBreaker.markSuccessfully();
item.updateStatusAndDetail(EventPublishingStatus.FAILED, "internal error");
throw new EventPublishingException("Error publishing message to kafka", e);
} catch (final RuntimeException e) {
circuitBreaker.markSuccessfully();
item.updateStatusAndDetail(EventPublishingStatus.FAILED, "internal error");
throw new EventPublishingException("Error publishing message to kafka", e);
}
}
use of org.apache.kafka.clients.producer.ProducerRecord in project hazelcast-jet by hazelcast.
the class KafkaSinkTest method testWriteToSpecificPartitions.
@Test
public void testWriteToSpecificPartitions() throws Exception {
String localTopic = topic;
Pipeline p = Pipeline.create();
p.drawFrom(Sources.<String, String>map(SOURCE_IMAP_NAME)).drainTo(KafkaSinks.kafka(properties, e -> new ProducerRecord<>(localTopic, Integer.valueOf(e.getKey()), e.getKey(), e.getValue())));
instance.newJob(p).join();
assertTopicContentsEventually(sourceIMap, true);
}
use of org.apache.kafka.clients.producer.ProducerRecord in project debezium by debezium.
the class KafkaDatabaseHistoryTest method shouldIgnoreUnparseableMessages.
@Test
public void shouldIgnoreUnparseableMessages() throws Exception {
// Create the empty topic ...
kafka.createTopic(topicName, 1, 1);
// Create invalid records
final ProducerRecord<String, String> nullRecord = new ProducerRecord<>(topicName, PARTITION_NO, null, null);
final ProducerRecord<String, String> emptyRecord = new ProducerRecord<>(topicName, PARTITION_NO, null, "");
final ProducerRecord<String, String> noSourceRecord = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"DROP TABLE foo;\"}");
final ProducerRecord<String, String> noPositionRecord = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"databaseName\":\"db1\",\"ddl\":\"DROP TABLE foo;\"}");
final ProducerRecord<String, String> invalidJSONRecord1 = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"DROP TABLE foo;\"");
final ProducerRecord<String, String> invalidJSONRecord2 = new ProducerRecord<>(topicName, PARTITION_NO, null, "\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"DROP TABLE foo;\"}");
final ProducerRecord<String, String> invalidSQL = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"xxxDROP TABLE foo;\"}");
final Configuration intruderConfig = Configuration.create().withDefault(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.brokerList()).withDefault(ProducerConfig.CLIENT_ID_CONFIG, "intruder").withDefault(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class).withDefault(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class).build();
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(intruderConfig.asProperties())) {
producer.send(nullRecord).get();
producer.send(emptyRecord).get();
producer.send(noSourceRecord).get();
producer.send(noPositionRecord).get();
producer.send(invalidJSONRecord1).get();
producer.send(invalidJSONRecord2).get();
producer.send(invalidSQL).get();
}
testHistoryTopicContent(true);
}
use of org.apache.kafka.clients.producer.ProducerRecord in project debezium by debezium.
the class KafkaDatabaseHistoryTest method shouldStopOnUnparseableSQL.
@Test(expected = ParsingException.class)
public void shouldStopOnUnparseableSQL() throws Exception {
// Create the empty topic ...
kafka.createTopic(topicName, 1, 1);
// Create invalid records
final ProducerRecord<String, String> invalidSQL = new ProducerRecord<>(topicName, PARTITION_NO, null, "{\"source\":{\"server\":\"my-server\"},\"position\":{\"filename\":\"my-txn-file.log\",\"position\":39},\"databaseName\":\"db1\",\"ddl\":\"xxxDROP TABLE foo;\"}");
final Configuration intruderConfig = Configuration.create().withDefault(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.brokerList()).withDefault(ProducerConfig.CLIENT_ID_CONFIG, "intruder").withDefault(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class).withDefault(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class).build();
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(intruderConfig.asProperties())) {
producer.send(invalidSQL).get();
}
testHistoryTopicContent(false);
}
use of org.apache.kafka.clients.producer.ProducerRecord in project mist by snuspl.
the class KafkaSourceTest method testKafkaDataGenerator.
/**
* Test whether TextKafkaDataGenerator fetches input stream
* from the Kafka server and generates data correctly.
* @throws Exception
*/
@Test(timeout = 30000L)
public void testKafkaDataGenerator() throws Exception {
final Map<String, String> inputStream = new HashMap<>();
inputStream.put("0", "Lorem ipsum dolor sit amet, consectetur adipiscing elit.");
inputStream.put("1", "In in leo nec erat fringilla mattis eu non massa.");
inputStream.put("2", "Cras quis diam suscipit, commodo enim id, pulvinar nunc.");
final CountDownLatch dataCountDownLatch = new CountDownLatch(inputStream.size());
final Map<String, String> result = new HashMap<>();
// create local kafka broker
KafkaLocalBroker kafkaLocalBroker = new KafkaLocalBroker(KAFKA_PORT, KAFKA_ADDRESS, ZK_PORT, ZK_ADDRESS);
kafkaLocalBroker.start();
// define kafka consumer configuration
final HashMap<String, Object> kafkaConsumerConf = new HashMap<>();
kafkaConsumerConf.put("bootstrap.servers", kafkaLocalBroker.getLocalhostBroker());
kafkaConsumerConf.put("group.id", "SourceTestGroup");
kafkaConsumerConf.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
kafkaConsumerConf.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
kafkaConsumerConf.put("auto.offset.reset", "earliest");
// define kafka producer configuration
final HashMap<String, Object> kafkaProducerConf = new HashMap<>();
kafkaProducerConf.put("bootstrap.servers", kafkaLocalBroker.getLocalhostBroker());
kafkaProducerConf.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
kafkaProducerConf.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
// create kafka source
final KafkaDataGenerator<Integer, String> kafkaDataGenerator = new KafkaDataGenerator<>(KAFKA_TOPIC, kafkaConsumerConf, kafkaSharedResource);
final SourceTestEventGenerator<String, String> eventGenerator = new SourceTestEventGenerator<>(result, dataCountDownLatch);
kafkaDataGenerator.setEventGenerator(eventGenerator);
kafkaDataGenerator.start();
// create kafka producer
final KafkaProducer<String, String> producer = new KafkaProducer<>(kafkaProducerConf);
final ProducerRecord<String, String> record1 = new ProducerRecord<>(KAFKA_TOPIC, "0", inputStream.get("0"));
final ProducerRecord<String, String> record2 = new ProducerRecord<>(KAFKA_TOPIC, "1", inputStream.get("1"));
final ProducerRecord<String, String> record3 = new ProducerRecord<>(KAFKA_TOPIC, "2", inputStream.get("2"));
producer.send(record1);
producer.send(record2);
producer.send(record3);
producer.close();
// wait for the consumer to receive the inputs.
dataCountDownLatch.await();
kafkaDataGenerator.close();
// KafkaDataGenerator will wait until it's pollTimeout before it is closed.
// Therefore, we need to wait a bit for KafkaDataGenerator.
// TODO: [MIST-369] Removing sleep in the `KafkaSourceTest`
sleep(2000);
kafkaLocalBroker.stop();
Assert.assertEquals(inputStream, result);
}
Aggregations