Search in sources :

Example 1 with BinlogFileOffset

use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.

the class OffsetKafkaStore method getLastBinlogFileOffset.

public Optional<BinlogFileOffset> getLastBinlogFileOffset() {
    try (KafkaConsumer<String, String> consumer = createConsumer()) {
        consumer.partitionsFor(dbHistoryTopicName);
        consumer.subscribe(Arrays.asList(dbHistoryTopicName));
        int count = N;
        BinlogFileOffset result = null;
        boolean lastRecordFound = false;
        while (!lastRecordFound) {
            ConsumerRecords<String, String> records = consumer.poll(100);
            if (records.isEmpty()) {
                count--;
                if (count == 0)
                    lastRecordFound = true;
            } else {
                count = N;
                for (ConsumerRecord<String, String> record : records) {
                    BinlogFileOffset current = handleRecord(record);
                    if (current != null) {
                        result = current;
                    }
                }
            }
        }
        return Optional.ofNullable(result);
    }
}
Also used : BinlogFileOffset(io.eventuate.local.common.BinlogFileOffset)

Example 2 with BinlogFileOffset

use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.

the class AbstractDatabaseOffsetKafkaStoreTest method generateAndSaveBinlogFileOffset.

private BinlogFileOffset generateAndSaveBinlogFileOffset() throws InterruptedException {
    BinlogFileOffset bfo = generateBinlogFileOffset();
    DatabaseOffsetKafkaStore binlogOffsetKafkaStore = getDatabaseOffsetKafkaStore(eventuateConfigurationProperties.getDbHistoryTopicName(), "mySqlBinaryLogClientName");
    binlogOffsetKafkaStore.save(bfo);
    Thread.sleep(5000);
    BinlogFileOffset savedBfo = binlogOffsetKafkaStore.getLastBinlogFileOffset().get();
    assertEquals(bfo, savedBfo);
    binlogOffsetKafkaStore.stop();
    return savedBfo;
}
Also used : BinlogFileOffset(io.eventuate.local.common.BinlogFileOffset) DatabaseOffsetKafkaStore(io.eventuate.local.db.log.common.DatabaseOffsetKafkaStore)

Example 3 with BinlogFileOffset

use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.

the class AbstractDatabaseOffsetKafkaStoreTest method assertLastRecordEquals.

private void assertLastRecordEquals(BinlogFileOffset binlogFileOffset) {
    DatabaseOffsetKafkaStore binlogOffsetKafkaStore = getDatabaseOffsetKafkaStore(eventuateConfigurationProperties.getDbHistoryTopicName(), "mySqlBinaryLogClientName");
    BinlogFileOffset lastRecord = binlogOffsetKafkaStore.getLastBinlogFileOffset().get();
    assertEquals(binlogFileOffset, lastRecord);
    binlogOffsetKafkaStore.stop();
}
Also used : BinlogFileOffset(io.eventuate.local.common.BinlogFileOffset) DatabaseOffsetKafkaStore(io.eventuate.local.db.log.common.DatabaseOffsetKafkaStore)

Example 4 with BinlogFileOffset

use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.

the class AbstractDuplicatePublishingDetectorTest method floodTopic.

private void floodTopic(Producer<String, String> producer, String binlogFilename, String topicName) {
    for (int i = 0; i < 10; i++) {
        PublishedEvent publishedEvent = new PublishedEvent();
        publishedEvent.setEntityId(UUID.randomUUID().toString());
        publishedEvent.setBinlogFileOffset(new BinlogFileOffset(binlogFilename, (long) i));
        String json = JSonMapper.toJson(publishedEvent);
        producer.send(new ProducerRecord<>(topicName, publishedEvent.getEntityId(), json));
    }
}
Also used : BinlogFileOffset(io.eventuate.local.common.BinlogFileOffset) PublishedEvent(io.eventuate.local.common.PublishedEvent)

Example 5 with BinlogFileOffset

use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.

the class DuplicatePublishingDetector method fetchMaxOffsetFor.

private Optional<BinlogFileOffset> fetchMaxOffsetFor(String destinationTopic) {
    String subscriberId = "duplicate-checker-" + destinationTopic + "-" + System.currentTimeMillis();
    Properties consumerProperties = ConsumerPropertiesFactory.makeConsumerProperties(kafkaBootstrapServers, subscriberId);
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties);
    List<PartitionInfo> partitions = EventuateKafkaConsumer.verifyTopicExistsBeforeSubscribing(consumer, destinationTopic);
    List<TopicPartition> topicPartitionList = partitions.stream().map(p -> new TopicPartition(destinationTopic, p.partition())).collect(toList());
    consumer.assign(topicPartitionList);
    consumer.poll(0);
    logger.info("Seeking to end");
    try {
        consumer.seekToEnd(topicPartitionList);
    } catch (IllegalStateException e) {
        logger.error("Error seeking " + destinationTopic, e);
        return Optional.empty();
    }
    List<PartitionOffset> positions = topicPartitionList.stream().map(tp -> new PartitionOffset(tp.partition(), consumer.position(tp) - 1)).filter(po -> po.offset >= 0).collect(toList());
    logger.info("Seeking to positions=" + positions);
    positions.forEach(po -> {
        consumer.seek(new TopicPartition(destinationTopic, po.partition), po.offset);
    });
    logger.info("Polling for records");
    List<ConsumerRecord<String, String>> records = new ArrayList<>();
    while (records.size() < positions.size()) {
        ConsumerRecords<String, String> consumerRecords = consumer.poll(1000);
        consumerRecords.forEach(records::add);
    }
    logger.info("Got records: {}", records.size());
    Optional<BinlogFileOffset> max = StreamSupport.stream(records.spliterator(), false).map(record -> {
        logger.info(String.format("got record: %s %s %s", record.partition(), record.offset(), record.value()));
        return JSonMapper.fromJson(record.value(), PublishedEvent.class).getBinlogFileOffset();
    }).filter(binlogFileOffset -> binlogFileOffset != null).max((blfo1, blfo2) -> blfo1.isSameOrAfter(blfo2) ? 1 : -1);
    consumer.close();
    return max;
}
Also used : ConsumerPropertiesFactory(io.eventuate.local.java.kafka.consumer.ConsumerPropertiesFactory) TopicPartition(org.apache.kafka.common.TopicPartition) java.util(java.util) Logger(org.slf4j.Logger) BinlogFileOffset(io.eventuate.local.common.BinlogFileOffset) PublishedEvent(io.eventuate.local.common.PublishedEvent) LoggerFactory(org.slf4j.LoggerFactory) EventuateKafkaConsumer(io.eventuate.local.java.kafka.consumer.EventuateKafkaConsumer) JSonMapper(io.eventuate.javaclient.commonimpl.JSonMapper) PartitionInfo(org.apache.kafka.common.PartitionInfo) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Collectors.toList(java.util.stream.Collectors.toList) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) StreamSupport(java.util.stream.StreamSupport) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) BinlogFileOffset(io.eventuate.local.common.BinlogFileOffset) EventuateKafkaConsumer(io.eventuate.local.java.kafka.consumer.EventuateKafkaConsumer) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) PublishedEvent(io.eventuate.local.common.PublishedEvent) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo)

Aggregations

BinlogFileOffset (io.eventuate.local.common.BinlogFileOffset)12 AbstractCdcTest (io.eventuate.local.test.util.AbstractCdcTest)4 Test (org.junit.Test)4 DuplicatePublishingDetector (io.eventuate.local.db.log.common.DuplicatePublishingDetector)3 JSonMapper (io.eventuate.javaclient.commonimpl.JSonMapper)2 PublishedEvent (io.eventuate.local.common.PublishedEvent)2 DatabaseOffsetKafkaStore (io.eventuate.local.db.log.common.DatabaseOffsetKafkaStore)2 IOException (java.io.IOException)2 SQLException (java.sql.SQLException)2 Logger (org.slf4j.Logger)2 LoggerFactory (org.slf4j.LoggerFactory)2 BinaryLogClient (com.github.shyiko.mysql.binlog.BinaryLogClient)1 BinLogEvent (io.eventuate.local.common.BinLogEvent)1 DbLogClient (io.eventuate.local.db.log.common.DbLogClient)1 ConsumerPropertiesFactory (io.eventuate.local.java.kafka.consumer.ConsumerPropertiesFactory)1 EventuateKafkaConsumer (io.eventuate.local.java.kafka.consumer.EventuateKafkaConsumer)1 MessageImpl (io.eventuate.tram.messaging.common.MessageImpl)1 ByteBuffer (java.nio.ByteBuffer)1 Connection (java.sql.Connection)1 DriverManager (java.sql.DriverManager)1