use of com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDO in project canal by alibaba.
the class DatabaseTableMeta method applyHistoryToDB.
private boolean applyHistoryToDB(EntryPosition position, String schema, String ddl, String extra) {
Map<String, String> content = new HashMap<>();
content.put("destination", destination);
content.put("binlogFile", position.getJournalName());
content.put("binlogOffest", String.valueOf(position.getPosition()));
content.put("binlogMasterId", String.valueOf(position.getServerId()));
content.put("binlogTimestamp", String.valueOf(position.getTimestamp()));
content.put("useSchema", schema);
if (content.isEmpty()) {
throw new RuntimeException("apply failed caused by content is empty in applyHistoryToDB");
}
// 待补充
List<DdlResult> ddlResults = DruidDdlParser.parse(ddl, schema);
if (ddlResults.size() > 0) {
DdlResult ddlResult = ddlResults.get(0);
content.put("sqlSchema", ddlResult.getSchemaName());
content.put("sqlTable", ddlResult.getTableName());
content.put("sqlType", ddlResult.getType().name());
content.put("sqlText", ddl);
content.put("extra", extra);
}
MetaHistoryDO metaDO = new MetaHistoryDO();
try {
BeanUtils.populate(metaDO, content);
// 会建立唯一约束,解决:
// 1. 重复的binlog file+offest
// 2. 重复的masterId+timestamp
metaHistoryDAO.insert(metaDO);
} catch (Throwable e) {
if (isUkDuplicateException(e)) {
// 忽略掉重复的位点
logger.warn("dup apply for sql : " + ddl);
} else {
throw new CanalParseException("apply history to db failed caused by : " + e.getMessage(), e);
}
}
return true;
}
use of com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDO in project canal by alibaba.
the class DatabaseTableMeta method applyHistoryOnMemory.
private boolean applyHistoryOnMemory(EntryPosition position, EntryPosition rollbackPosition) {
try {
List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp(destination, position.getTimestamp(), rollbackPosition.getTimestamp());
if (metaHistoryDOList == null) {
return true;
}
for (MetaHistoryDO metaHistoryDO : metaHistoryDOList) {
String binlogFile = metaHistoryDO.getBinlogFile();
Long binlogOffest = metaHistoryDO.getBinlogOffest();
String binlogMasterId = metaHistoryDO.getBinlogMasterId();
Long binlogTimestamp = metaHistoryDO.getBinlogTimestamp();
String useSchema = metaHistoryDO.getUseSchema();
String sqlData = metaHistoryDO.getSqlText();
EntryPosition snapshotPosition = new EntryPosition(binlogFile, binlogOffest == null ? 0L : binlogOffest, binlogTimestamp == null ? 0L : binlogTimestamp, Long.valueOf(binlogMasterId == null ? "-2" : binlogMasterId));
// 如果是同一秒内,对比一下history的位点,如果比期望的位点要大,忽略之
if (snapshotPosition.getTimestamp() > rollbackPosition.getTimestamp()) {
continue;
} else if (rollbackPosition.getServerId().equals(snapshotPosition.getServerId()) && snapshotPosition.compareTo(rollbackPosition) > 0) {
continue;
}
// 记录到内存
if (!memoryTableMeta.apply(snapshotPosition, useSchema, sqlData, null)) {
return false;
}
}
return metaHistoryDOList.size() > 0;
} catch (Throwable e) {
throw new CanalParseException("apply failed", e);
}
}
use of com.alibaba.otter.canal.parse.inbound.mysql.tsdb.dao.MetaHistoryDO in project canal by alibaba.
the class MetaHistoryDAOTest method testSimple.
@Test
public void testSimple() {
MetaHistoryDO historyDO = new MetaHistoryDO();
historyDO.setDestination("test");
historyDO.setBinlogFile("000001");
historyDO.setBinlogOffest(4L);
historyDO.setBinlogMasterId("1");
historyDO.setBinlogTimestamp(System.currentTimeMillis() - 7300 * 1000);
historyDO.setSqlSchema("test");
historyDO.setUseSchema("test");
historyDO.setSqlTable("testTable");
historyDO.setSqlTable("drop table testTable");
metaHistoryDAO.insert(historyDO);
int count = metaHistoryDAO.deleteByTimestamp("test", 7200);
System.out.println(count);
List<MetaHistoryDO> metaHistoryDOList = metaHistoryDAO.findByTimestamp("test", 0L, System.currentTimeMillis());
for (MetaHistoryDO metaHistoryDO : metaHistoryDOList) {
System.out.println(metaHistoryDO.getId());
}
}
Aggregations