use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class AbstractDatabaseOffsetKafkaStoreTest method shouldReadTheLastRecordMultipleTimes.
@Test
public void shouldReadTheLastRecordMultipleTimes() throws InterruptedException {
BinlogFileOffset bfo = generateAndSaveBinlogFileOffset();
assertLastRecordEquals(bfo);
assertLastRecordEquals(bfo);
}
use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class AbstractDuplicatePublishingDetectorTest method shouldBePublishedTest.
@Test
public void shouldBePublishedTest() {
String topicName = generateUniqueTopicName();
String binlogFilename = "binlog.file." + System.currentTimeMillis();
DuplicatePublishingDetector duplicatePublishingDetector = new DuplicatePublishingDetector(eventuateKafkaConfigurationProperties.getBootstrapServers());
Producer<String, String> producer = createProducer(eventuateKafkaConfigurationProperties.getBootstrapServers());
floodTopic(producer, binlogFilename, topicName);
producer.close();
assertFalse(duplicatePublishingDetector.shouldBePublished(new BinlogFileOffset(binlogFilename, 1L), topicName));
assertTrue(duplicatePublishingDetector.shouldBePublished(new BinlogFileOffset(binlogFilename, 10L), topicName));
}
use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class AbstractDuplicatePublishingDetectorTest method emptyTopicTest.
@Test
public void emptyTopicTest() {
DuplicatePublishingDetector duplicatePublishingDetector = new DuplicatePublishingDetector(eventuateKafkaConfigurationProperties.getBootstrapServers());
BinlogFileOffset bfo = generateBinlogFileOffset();
assertTrue(duplicatePublishingDetector.shouldBePublished(bfo, generateUniqueTopicName()));
}
use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class AbstractDuplicatePublishingDetectorTest method shouldHandlePublishCheckForOldEntires.
@Test
public void shouldHandlePublishCheckForOldEntires() {
String topicName = generateUniqueTopicName();
String binlogFilename = "binlog.file." + System.currentTimeMillis();
DuplicatePublishingDetector duplicatePublishingDetector = new DuplicatePublishingDetector(eventuateKafkaConfigurationProperties.getBootstrapServers());
Producer<String, String> producer = createProducer(eventuateKafkaConfigurationProperties.getBootstrapServers());
floodTopic(producer, binlogFilename, topicName);
sendOldPublishedEvent(producer, topicName);
producer.close();
assertTrue(duplicatePublishingDetector.shouldBePublished(new BinlogFileOffset(binlogFilename, 10L), topicName));
}
use of io.eventuate.local.common.BinlogFileOffset in project eventuate-local by eventuate-local.
the class MySqlBinaryLogClient method start.
public void start(Optional<BinlogFileOffset> binlogFileOffset, Consumer<M> eventConsumer) {
client = new BinaryLogClient(host, port, dbUserName, dbPassword);
client.setServerId(binlogClientUniqueId);
client.setKeepAliveInterval(5 * 1000);
BinlogFileOffset bfo = binlogFileOffset.orElse(new BinlogFileOffset("", 4L));
logger.debug("Starting with {}", bfo);
client.setBinlogFilename(bfo.getBinlogFilename());
client.setBinlogPosition(bfo.getOffset());
client.setEventDeserializer(getEventDeserializer());
client.registerEventListener(event -> {
switch(event.getHeader().getEventType()) {
case TABLE_MAP:
{
TableMapEventData tableMapEvent = event.getData();
if (tableMapEvent.getTable().equalsIgnoreCase(sourceTableName)) {
tableMapEventByTableId.put(tableMapEvent.getTableId(), tableMapEvent);
}
break;
}
case EXT_WRITE_ROWS:
{
logger.debug("Got binlog event {}", event);
offset = ((EventHeaderV4) event.getHeader()).getPosition();
WriteRowsEventData eventData = event.getData();
if (tableMapEventByTableId.containsKey(eventData.getTableId())) {
try {
eventConsumer.accept(writeRowsEventDataParser.parseEventData(eventData, getCurrentBinlogFilename(), offset));
} catch (IOException e) {
throw new RuntimeException("Event row parsing exception", e);
}
}
break;
}
case ROTATE:
{
RotateEventData eventData = event.getData();
if (eventData != null) {
binlogFilename = eventData.getBinlogFilename();
}
break;
}
}
});
connectWithRetriesOnFail();
}
Aggregations