use of org.apache.kafka.clients.producer.Callback in project registry by hortonworks.
the class KafkaAvroSerDesApp method sendMessages.
public void sendMessages(String payloadJsonFile) throws Exception {
Properties props = new Properties();
try (FileInputStream fileInputStream = new FileInputStream(this.producerProps)) {
props.load(fileInputStream);
}
int limit = Integer.parseInt(props.getProperty(MSGS_LIMIT_OLD, props.getProperty(MSGS_LIMIT, DEFAULT_MSGS_LIMIT + "")));
boolean ignoreInvalidMsgs = Boolean.parseBoolean(props.getProperty(IGNORE_INVALID_MSGS, "false"));
int current = 0;
Schema schema = new Schema.Parser().parse(new File(this.schemaFile));
String topicName = props.getProperty(TOPIC);
// set protocol version to the earlier one.
props.put(SERDES_PROTOCOL_VERSION, METADATA_ID_VERSION_PROTOCOL);
final Producer<String, Object> producer = new KafkaProducer<>(props);
final Callback callback = new MyProducerCallback();
try (BufferedReader bufferedReader = new BufferedReader(new FileReader(payloadJsonFile))) {
String line;
while (current++ < limit && (line = bufferedReader.readLine()) != null) {
// convert json to avro records
Object avroMsg = null;
try {
avroMsg = jsonToAvro(line, schema);
} catch (Exception ex) {
LOG.warn("Error encountered while converting json to avro of message [{}]", line, ex);
if (ignoreInvalidMsgs) {
continue;
} else {
throw ex;
}
}
// send avro messages to given topic using KafkaAvroSerializer which registers payload schema if it does not exist
// with schema name as "<topic-name>:v", type as "avro" and schemaGroup as "kafka".
// schema registry should be running so that KafkaAvroSerializer can register the schema.
LOG.info("Sending message: [{}] to topic: [{}]", avroMsg, topicName);
ProducerRecord<String, Object> producerRecord = new ProducerRecord<>(topicName, avroMsg);
try {
producer.send(producerRecord, callback);
} catch (SerDesException ex) {
LOG.warn("Error encountered while sending message [{}]", line, ex);
if (!ignoreInvalidMsgs) {
throw ex;
}
}
}
} finally {
producer.flush();
LOG.info("All message are successfully sent to topic: [{}]", topicName);
producer.close(5, TimeUnit.SECONDS);
}
}
use of org.apache.kafka.clients.producer.Callback in project registry by hortonworks.
the class KafkaAvroSerDesWithKafkaServerTest method produceMessage.
private String produceMessage(String topicName, Object msg) {
String bootstrapServers = CLUSTER.bootstrapServers();
Map<String, Object> config = new HashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
config.putAll(SCHEMA_REGISTRY_TEST_SERVER_CLIENT_WRAPPER.exportClientConf(true));
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class.getName());
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class.getName());
final Producer<String, Object> producer = new KafkaProducer<>(config);
final Callback callback = new ProducerCallback();
LOG.info("Sending message: [{}] to topic: [{}]", msg, topicName);
ProducerRecord<String, Object> producerRecord = new ProducerRecord<>(topicName, getKey(msg), msg);
producer.send(producerRecord, callback);
producer.flush();
LOG.info("Message successfully sent to topic: [{}]", topicName);
producer.close(5, TimeUnit.SECONDS);
return bootstrapServers;
}
use of org.apache.kafka.clients.producer.Callback in project luntan by caoawei.
the class KafkaPublisher method publish.
public static void publish(TopicType topicType, Object message) {
if (topicType == null) {
throw new NullPointerException("the topicType can not be null");
}
if (message == null) {
throw new NullPointerException("the message can not be null");
}
if (!isEnableKafka()) {
logger.error("[kafka 当前系统未启用]");
return;
}
if (kafkaProducer == null) {
synchronized (logger) {
if (kafkaProducer == null) {
kafkaProducer = new KafkaProducer<>(KafkaConfig.producerConfig());
}
}
}
try {
// 当前系统环境(考虑到实际多套环境版本不尽相同)
String env = ConfigUtil.getConfig("mq.kafka.env", "");
String topic = env + "-" + topicType.getTopic();
String data = Utils.toJson(message);
ProducerRecord<String, byte[]> record = new ProducerRecord<String, byte[]>(topic, data.getBytes("utf-8"));
logger.info("[kafka 开始发布消息.....]");
Future<RecordMetadata> future = kafkaProducer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null) {
logger.error("[kafak 发布消息失败] cause:{}", exception.getMessage(), exception);
return;
}
logger.info("[kafka 发布成功]: 主题:{},分区:{},偏移:{}", metadata.topic(), metadata.partition(), metadata.offset());
}
});
} catch (Exception e) {
logger.error("[kafka 发布失败]: cause:{}", e.getMessage(), e);
kafkaProducer.close();
synchronized (logger) {
kafkaProducer = null;
}
}
}
use of org.apache.kafka.clients.producer.Callback in project cruise-control by linkedin.
the class CruiseControlMetricsReporterTest method setUp.
@Before
public void setUp() {
super.setUp();
Properties props = new Properties();
props.setProperty(ProducerConfig.ACKS_CONFIG, "-1");
AtomicInteger failed = new AtomicInteger(0);
try (Producer<String, String> producer = createProducer(props)) {
for (int i = 0; i < 10; i++) {
producer.send(new ProducerRecord<>("TestTopic", Integer.toString(i)), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
failed.incrementAndGet();
}
}
});
}
}
assertEquals(0, failed.get());
}
use of org.apache.kafka.clients.producer.Callback in project nakadi by zalando.
the class KafkaTopicRepositoryTest method whenKafkaPublishTimeoutThenCircuitIsOpened.
@Test
public void whenKafkaPublishTimeoutThenCircuitIsOpened() throws Exception {
when(nakadiSettings.getKafkaSendTimeoutMs()).thenReturn(1000L);
when(kafkaProducer.partitionsFor(EXPECTED_PRODUCER_RECORD.topic())).thenReturn(ImmutableList.of(new PartitionInfo(EXPECTED_PRODUCER_RECORD.topic(), 1, new Node(1, "host", 9091), null, null)));
when(kafkaProducer.send(any(), any())).thenAnswer(invocation -> {
final Callback callback = (Callback) invocation.getArguments()[1];
callback.onCompletion(null, new TimeoutException());
return null;
});
final List<BatchItem> batches = new LinkedList<>();
for (int i = 0; i < 1000; i++) {
try {
final BatchItem batchItem = new BatchItem("{}", BatchItem.EmptyInjectionConfiguration.build(1, true), new BatchItem.InjectionConfiguration[BatchItem.Injection.values().length], Collections.emptyList());
batchItem.setPartition("1");
batches.add(batchItem);
kafkaTopicRepository.syncPostBatch(EXPECTED_PRODUCER_RECORD.topic(), ImmutableList.of(batchItem));
fail();
} catch (final EventPublishingException e) {
}
}
Assert.assertTrue(batches.stream().filter(item -> item.getResponse().getPublishingStatus() == EventPublishingStatus.FAILED && item.getResponse().getDetail().equals("short circuited")).count() >= 1);
}
Aggregations