use of com.alibaba.otter.canal.protocol.Message in project canal by alibaba.
the class CanalKafkaClientExample method process.
private void process() {
while (!running) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
while (running) {
try {
connector.connect();
connector.subscribe();
while (running) {
try {
// 获取message
List<Message> messages = connector.getListWithoutAck(100L, TimeUnit.MILLISECONDS);
if (messages == null) {
continue;
}
for (Message message : messages) {
long batchId = message.getId();
int size = message.getEntries().size();
if (batchId == -1 || size == 0) {
// try {
// Thread.sleep(1000);
// } catch (InterruptedException e) {
// }
} else {
// printSummary(message, batchId, size);
// printEntry(message.getEntries());
logger.info(message.toString());
}
}
// 提交确认
connector.ack();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
connector.unsubscribe();
connector.disconnect();
}
use of com.alibaba.otter.canal.protocol.Message in project canal by alibaba.
the class AbstractCanalClientTest method process.
protected void process() {
int batchSize = 5 * 1024;
while (running) {
try {
MDC.put("destination", destination);
connector.connect();
connector.subscribe();
while (running) {
// 获取指定数量的数据
Message message = connector.getWithoutAck(batchSize);
long batchId = message.getId();
int size = message.getEntries().size();
if (batchId == -1 || size == 0) {
// try {
// Thread.sleep(1000);
// } catch (InterruptedException e) {
// }
} else {
printSummary(message, batchId, size);
printEntry(message.getEntries());
}
if (batchId != -1) {
// 提交确认
connector.ack(batchId);
}
}
} catch (Throwable e) {
logger.error("process error!", e);
try {
Thread.sleep(1000L);
} catch (InterruptedException e1) {
// ignore
}
// 处理失败, 回滚数据
connector.rollback();
} finally {
connector.disconnect();
MDC.remove("destination");
}
}
}
use of com.alibaba.otter.canal.protocol.Message in project canal by alibaba.
the class CanalKafkaConsumer method getMessage.
@SuppressWarnings("unchecked")
@Override
public List<CommonMessage> getMessage(Long timeout, TimeUnit unit) {
if (!flatMessage) {
ConsumerRecords<String, Message> records = (ConsumerRecords<String, Message>) kafkaConsumer.poll(unit.toMillis(timeout));
if (!records.isEmpty()) {
currentOffsets.clear();
List<CommonMessage> messages = new ArrayList<>();
for (ConsumerRecord<String, Message> record : records) {
if (currentOffsets.get(record.partition()) == null) {
currentOffsets.put(record.partition(), record.offset());
}
messages.addAll(MessageUtil.convert(record.value()));
}
return messages;
}
} else {
ConsumerRecords<String, String> records = (ConsumerRecords<String, String>) kafkaConsumer.poll(unit.toMillis(timeout));
if (!records.isEmpty()) {
List<CommonMessage> messages = new ArrayList<>();
currentOffsets.clear();
for (ConsumerRecord<String, String> record : records) {
if (currentOffsets.get(record.partition()) == null) {
currentOffsets.put(record.partition(), record.offset());
}
String flatMessageJson = record.value();
CommonMessage flatMessages = JSON.parseObject(flatMessageJson, CommonMessage.class);
messages.add(flatMessages);
}
return messages;
}
}
return null;
}
use of com.alibaba.otter.canal.protocol.Message in project canal by alibaba.
the class CanalKafkaProducer method send.
@Override
public void send(MQDestination mqDestination, Message message, Callback callback) {
ExecutorTemplate template = new ExecutorTemplate(sendExecutor);
try {
List result;
if (!StringUtils.isEmpty(mqDestination.getDynamicTopic())) {
// 动态topic路由计算,只是基于schema/table,不涉及proto数据反序列化
Map<String, Message> messageMap = MQMessageUtils.messageTopics(message, mqDestination.getTopic(), mqDestination.getDynamicTopic());
// 针对不同的topic,引入多线程提升效率
for (Map.Entry<String, Message> entry : messageMap.entrySet()) {
final String topicName = entry.getKey().replace('.', '_');
final Message messageSub = entry.getValue();
template.submit((Callable) () -> {
try {
return send(mqDestination, topicName, messageSub, mqProperties.isFlatMessage());
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
result = template.waitForResult();
} else {
result = new ArrayList();
List<Future> futures = send(mqDestination, mqDestination.getTopic(), message, mqProperties.isFlatMessage());
result.add(futures);
}
// 一个批次的所有topic和分区的队列,都采用异步的模式进行多线程批量发送
// 最后在集结点进行flush等待,确保所有数据都写出成功
// 注意:kafka的异步模式如果要保证顺序性,需要设置max.in.flight.requests.per.connection=1,确保在网络异常重试时有排他性
producer.flush();
// flush操作也有可能是发送失败,这里需要异步关注一下发送结果,针对有异常的直接出发rollback
for (Object obj : result) {
List<Future> futures = (List<Future>) obj;
for (Future future : futures) {
try {
future.get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
}
callback.commit();
} catch (Throwable e) {
logger.error(e.getMessage(), e);
callback.rollback();
} finally {
template.clear();
}
}
use of com.alibaba.otter.canal.protocol.Message in project canal by alibaba.
the class CanalKafkaProducer method send.
private List<Future> send(MQDestination mqDestination, String topicName, Message message, boolean flat) {
List<ProducerRecord<String, byte[]>> records = new ArrayList<>();
// 获取当前topic的分区数
Integer partitionNum = MQMessageUtils.parseDynamicTopicPartition(topicName, mqDestination.getDynamicTopicPartitionNum());
if (partitionNum == null) {
partitionNum = mqDestination.getPartitionsNum();
}
if (!flat) {
if (mqDestination.getPartitionHash() != null && !mqDestination.getPartitionHash().isEmpty()) {
// 并发构造
EntryRowData[] datas = MQMessageUtils.buildMessageData(message, buildExecutor);
// 串行分区
Message[] messages = MQMessageUtils.messagePartition(datas, message.getId(), partitionNum, mqDestination.getPartitionHash(), this.mqProperties.isDatabaseHash());
int length = messages.length;
for (int i = 0; i < length; i++) {
Message messagePartition = messages[i];
if (messagePartition != null) {
records.add(new ProducerRecord<>(topicName, i, null, CanalMessageSerializerUtil.serializer(messagePartition, mqProperties.isFilterTransactionEntry())));
}
}
} else {
final int partition = mqDestination.getPartition() != null ? mqDestination.getPartition() : 0;
records.add(new ProducerRecord<>(topicName, partition, null, CanalMessageSerializerUtil.serializer(message, mqProperties.isFilterTransactionEntry())));
}
} else {
// 发送扁平数据json
// 并发构造
EntryRowData[] datas = MQMessageUtils.buildMessageData(message, buildExecutor);
// 串行分区
List<FlatMessage> flatMessages = MQMessageUtils.messageConverter(datas, message.getId());
for (FlatMessage flatMessage : flatMessages) {
if (mqDestination.getPartitionHash() != null && !mqDestination.getPartitionHash().isEmpty()) {
FlatMessage[] partitionFlatMessage = MQMessageUtils.messagePartition(flatMessage, partitionNum, mqDestination.getPartitionHash(), this.mqProperties.isDatabaseHash());
int length = partitionFlatMessage.length;
for (int i = 0; i < length; i++) {
FlatMessage flatMessagePart = partitionFlatMessage[i];
if (flatMessagePart != null) {
records.add(new ProducerRecord<>(topicName, i, null, JSON.toJSONBytes(flatMessagePart, SerializerFeature.WriteMapNullValue)));
}
}
} else {
final int partition = mqDestination.getPartition() != null ? mqDestination.getPartition() : 0;
records.add(new ProducerRecord<>(topicName, partition, null, JSON.toJSONBytes(flatMessage, SerializerFeature.WriteMapNullValue)));
}
}
}
return produce(records);
}
Aggregations