use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class OffsetKafkaStore method getLastBinlogFileOffset.
public Optional<BinlogFileOffset> getLastBinlogFileOffset() {
try (KafkaConsumer<String, String> consumer = createConsumer()) {
consumer.partitionsFor(dbHistoryTopicName);
consumer.subscribe(Arrays.asList(dbHistoryTopicName));
int count = N;
BinlogFileOffset result = null;
boolean lastRecordFound = false;
while (!lastRecordFound) {
ConsumerRecords<String, String> records = consumer.poll(100);
if (records.isEmpty()) {
count--;
if (count == 0)
lastRecordFound = true;
} else {
count = N;
for (ConsumerRecord<String, String> record : records) {
BinlogFileOffset current = handleRecord(record);
if (current != null) {
result = current;
}
}
}
}
return Optional.ofNullable(result);
}
}
use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class AbstractDatabaseOffsetKafkaStoreTest method generateAndSaveBinlogFileOffset.
private BinlogFileOffset generateAndSaveBinlogFileOffset() throws InterruptedException {
BinlogFileOffset bfo = generateBinlogFileOffset();
DatabaseOffsetKafkaStore binlogOffsetKafkaStore = getDatabaseOffsetKafkaStore(eventuateConfigurationProperties.getDbHistoryTopicName(), "mySqlBinaryLogClientName");
binlogOffsetKafkaStore.save(bfo);
Thread.sleep(5000);
BinlogFileOffset savedBfo = binlogOffsetKafkaStore.getLastBinlogFileOffset().get();
assertEquals(bfo, savedBfo);
binlogOffsetKafkaStore.stop();
return savedBfo;
}
use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class AbstractDatabaseOffsetKafkaStoreTest method assertLastRecordEquals.
private void assertLastRecordEquals(BinlogFileOffset binlogFileOffset) {
DatabaseOffsetKafkaStore binlogOffsetKafkaStore = getDatabaseOffsetKafkaStore(eventuateConfigurationProperties.getDbHistoryTopicName(), "mySqlBinaryLogClientName");
BinlogFileOffset lastRecord = binlogOffsetKafkaStore.getLastBinlogFileOffset().get();
assertEquals(binlogFileOffset, lastRecord);
binlogOffsetKafkaStore.stop();
}
use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class AbstractDuplicatePublishingDetectorTest method floodTopic.
private void floodTopic(Producer<String, String> producer, String binlogFilename, String topicName) {
for (int i = 0; i < 10; i++) {
PublishedEvent publishedEvent = new PublishedEvent();
publishedEvent.setEntityId(UUID.randomUUID().toString());
publishedEvent.setBinlogFileOffset(new BinlogFileOffset(binlogFilename, (long) i));
String json = JSonMapper.toJson(publishedEvent);
producer.send(new ProducerRecord<>(topicName, publishedEvent.getEntityId(), json));
}
}
use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class DuplicatePublishingDetector method fetchMaxOffsetFor.
private Optional<BinlogFileOffset> fetchMaxOffsetFor(String destinationTopic) {
String subscriberId = "duplicate-checker-" + destinationTopic + "-" + System.currentTimeMillis();
Properties consumerProperties = ConsumerPropertiesFactory.makeConsumerProperties(kafkaBootstrapServers, subscriberId);
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties);
List<PartitionInfo> partitions = EventuateKafkaConsumer.verifyTopicExistsBeforeSubscribing(consumer, destinationTopic);
List<TopicPartition> topicPartitionList = partitions.stream().map(p -> new TopicPartition(destinationTopic, p.partition())).collect(toList());
consumer.assign(topicPartitionList);
consumer.poll(0);
logger.info("Seeking to end");
try {
consumer.seekToEnd(topicPartitionList);
} catch (IllegalStateException e) {
logger.error("Error seeking " + destinationTopic, e);
return Optional.empty();
}
List<PartitionOffset> positions = topicPartitionList.stream().map(tp -> new PartitionOffset(tp.partition(), consumer.position(tp) - 1)).filter(po -> po.offset >= 0).collect(toList());
logger.info("Seeking to positions=" + positions);
positions.forEach(po -> {
consumer.seek(new TopicPartition(destinationTopic, po.partition), po.offset);
});
logger.info("Polling for records");
List<ConsumerRecord<String, String>> records = new ArrayList<>();
while (records.size() < positions.size()) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(1000);
consumerRecords.forEach(records::add);
}
logger.info("Got records: {}", records.size());
Optional<BinlogFileOffset> max = StreamSupport.stream(records.spliterator(), false).map(record -> {
logger.info(String.format("got record: %s %s %s", record.partition(), record.offset(), record.value()));
return JSonMapper.fromJson(record.value(), PublishedEvent.class).getBinlogFileOffset();
}).filter(binlogFileOffset -> binlogFileOffset != null).max((blfo1, blfo2) -> blfo1.isSameOrAfter(blfo2) ? 1 : -1);
consumer.close();
return max;
}
Aggregations