use of com.taobao.tddl.dbsync.binlog.LogEvent in project canal by alibaba.
the class MysqlConnection method dump.
public void dump(String binlogfilename, Long binlogPosition, SinkFunction func) throws IOException {
updateSettings();
loadBinlogChecksum();
sendRegisterSlave();
sendBinlogDump(binlogfilename, binlogPosition);
DirectLogFetcher fetcher = new DirectLogFetcher(connector.getReceiveBufferSize());
fetcher.start(connector.getChannel());
LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
LogContext context = new LogContext();
context.setFormatDescription(new FormatDescriptionLogEvent(4, binlogChecksum));
while (fetcher.fetch()) {
accumulateReceivedBytes(fetcher.limit());
LogEvent event = null;
event = decoder.decode(fetcher, context);
if (event == null) {
throw new CanalParseException("parse failed");
}
if (!func.sink(event)) {
break;
}
if (event.getSemival() == 1) {
sendSemiAck(context.getLogPosition().getFileName(), context.getLogPosition().getPosition());
}
}
}
use of com.taobao.tddl.dbsync.binlog.LogEvent in project canal by alibaba.
the class MysqlBinlogParsePerformanceTest method consumer.
public static void consumer(BlockingQueue<LogBuffer> buffer) throws IOException, InterruptedException {
LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
LogContext context = new LogContext();
AtomicLong sum = new AtomicLong(0);
long start = System.currentTimeMillis();
long last = 0;
long end = 0;
while (true) {
LogEvent event = null;
event = decoder.decode(buffer.take(), context);
int eventType = event.getHeader().getType();
switch(eventType) {
case LogEvent.ROTATE_EVENT:
break;
case LogEvent.WRITE_ROWS_EVENT_V1:
case LogEvent.WRITE_ROWS_EVENT:
parseRowsEvent((WriteRowsLogEvent) event, sum);
break;
case LogEvent.UPDATE_ROWS_EVENT_V1:
case LogEvent.PARTIAL_UPDATE_ROWS_EVENT:
case LogEvent.UPDATE_ROWS_EVENT:
parseRowsEvent((UpdateRowsLogEvent) event, sum);
break;
case LogEvent.DELETE_ROWS_EVENT_V1:
case LogEvent.DELETE_ROWS_EVENT:
parseRowsEvent((DeleteRowsLogEvent) event, sum);
break;
case LogEvent.XID_EVENT:
sum.incrementAndGet();
break;
case LogEvent.QUERY_EVENT:
sum.incrementAndGet();
break;
default:
break;
}
long current = sum.get();
if (current - last >= 100000) {
end = System.currentTimeMillis();
long tps = ((current - last) * 1000) / (end - start);
System.out.println(" total : " + sum + " , cost : " + (end - start) + " , tps : " + tps);
last = current;
start = end;
}
}
}
use of com.taobao.tddl.dbsync.binlog.LogEvent in project canal by alibaba.
the class DirectLogFetcherTest method testSimple.
@Test
public void testSimple() {
DirectLogFetcher fetcher = new DirectLogFetcher();
try {
MysqlConnector connector = new MysqlConnector(new InetSocketAddress("127.0.0.1", 3306), "canal", "canal");
connector.connect();
updateSettings(connector);
loadBinlogChecksum(connector);
sendRegisterSlave(connector, 3);
sendBinlogDump(connector, "mysql-bin.000001", 4L, 3);
fetcher.start(connector.getChannel());
LogDecoder decoder = new LogDecoder(LogEvent.UNKNOWN_EVENT, LogEvent.ENUM_END_EVENT);
LogContext context = new LogContext();
context.setFormatDescription(new FormatDescriptionLogEvent(4, binlogChecksum));
while (fetcher.fetch()) {
LogEvent event = null;
event = decoder.decode(fetcher, context);
if (event == null) {
throw new RuntimeException("parse failed");
}
int eventType = event.getHeader().getType();
switch(eventType) {
case LogEvent.ROTATE_EVENT:
// binlogFileName = ((RotateLogEvent)
// event).getFilename();
System.out.println(((RotateLogEvent) event).getFilename());
break;
case LogEvent.TABLE_MAP_EVENT:
parseTableMapEvent((TableMapLogEvent) event);
break;
case LogEvent.WRITE_ROWS_EVENT_V1:
case LogEvent.WRITE_ROWS_EVENT:
parseRowsEvent((WriteRowsLogEvent) event);
break;
case LogEvent.UPDATE_ROWS_EVENT_V1:
case LogEvent.PARTIAL_UPDATE_ROWS_EVENT:
case LogEvent.UPDATE_ROWS_EVENT:
parseRowsEvent((UpdateRowsLogEvent) event);
break;
case LogEvent.DELETE_ROWS_EVENT_V1:
case LogEvent.DELETE_ROWS_EVENT:
parseRowsEvent((DeleteRowsLogEvent) event);
break;
case LogEvent.QUERY_EVENT:
parseQueryEvent((QueryLogEvent) event);
break;
case LogEvent.ROWS_QUERY_LOG_EVENT:
parseRowsQueryEvent((RowsQueryLogEvent) event);
break;
case LogEvent.ANNOTATE_ROWS_EVENT:
break;
case LogEvent.XID_EVENT:
break;
default:
break;
}
}
} catch (Exception e) {
e.printStackTrace();
Assert.fail(e.getMessage());
} finally {
try {
fetcher.close();
} catch (IOException e) {
Assert.fail(e.getMessage());
}
}
}
use of com.taobao.tddl.dbsync.binlog.LogEvent in project canal by alibaba.
the class MysqlEventParser method findAsPerTimestampInSpecificLogFile.
/**
* 根据给定的时间戳,在指定的binlog中找到最接近于该时间戳(必须是小于时间戳)的一个事务起始位置。
* 针对最后一个binlog会给定endPosition,避免无尽的查询
*/
private EntryPosition findAsPerTimestampInSpecificLogFile(MysqlConnection mysqlConnection, final Long startTimestamp, final EntryPosition endPosition, final String searchBinlogFile, final Boolean justForPositionTimestamp) {
final LogPosition logPosition = new LogPosition();
try {
mysqlConnection.reconnect();
// 开始遍历文件
mysqlConnection.seek(searchBinlogFile, 4L, endPosition.getGtid(), new SinkFunction<LogEvent>() {
private LogPosition lastPosition;
public boolean sink(LogEvent event) {
EntryPosition entryPosition = null;
try {
CanalEntry.Entry entry = parseAndProfilingIfNecessary(event, true);
if (justForPositionTimestamp && logPosition.getPostion() == null && event.getWhen() > 0) {
// 初始位点
entryPosition = new EntryPosition(searchBinlogFile, event.getLogPos() - event.getEventLen(), event.getWhen() * 1000, event.getServerId());
entryPosition.setGtid(event.getHeader().getGtidSetStr());
logPosition.setPostion(entryPosition);
}
// 直接用event的位点来处理,解决一个binlog文件里没有任何事件导致死循环无法退出的问题
String logfilename = event.getHeader().getLogFileName();
// 记录的是binlog end offest,
// 因为与其对比的offest是show master status里的end offest
Long logfileoffset = event.getHeader().getLogPos();
Long logposTimestamp = event.getHeader().getWhen() * 1000;
Long serverId = event.getHeader().getServerId();
// 如果最小的一条记录都不满足条件,可直接退出
if (logposTimestamp >= startTimestamp) {
return false;
}
if (StringUtils.equals(endPosition.getJournalName(), logfilename) && endPosition.getPosition() <= logfileoffset) {
return false;
}
if (entry == null) {
return true;
}
// data.length,代表该事务的下一条offest,避免多余的事务重复
if (CanalEntry.EntryType.TRANSACTIONEND.equals(entry.getEntryType())) {
entryPosition = new EntryPosition(logfilename, logfileoffset, logposTimestamp, serverId);
if (logger.isDebugEnabled()) {
logger.debug("set {} to be pending start position before finding another proper one...", entryPosition);
}
logPosition.setPostion(entryPosition);
entryPosition.setGtid(entry.getHeader().getGtid());
} else if (CanalEntry.EntryType.TRANSACTIONBEGIN.equals(entry.getEntryType())) {
// 当前事务开始位点
entryPosition = new EntryPosition(logfilename, logfileoffset, logposTimestamp, serverId);
if (logger.isDebugEnabled()) {
logger.debug("set {} to be pending start position before finding another proper one...", entryPosition);
}
entryPosition.setGtid(entry.getHeader().getGtid());
logPosition.setPostion(entryPosition);
}
lastPosition = buildLastPosition(entry);
} catch (Throwable e) {
processSinkError(e, lastPosition, searchBinlogFile, 4L);
}
return running;
}
});
} catch (IOException e) {
logger.error("ERROR ## findAsPerTimestampInSpecificLogFile has an error", e);
}
if (logPosition.getPostion() != null) {
return logPosition.getPostion();
} else {
return null;
}
}
use of com.taobao.tddl.dbsync.binlog.LogEvent in project canal by alibaba.
the class MysqlEventParser method findTransactionBeginPosition.
// 根据想要的position,可能这个position对应的记录为rowdata,需要找到事务头,避免丢数据
// 主要考虑一个事务执行时间可能会几秒种,如果仅仅按照timestamp相同,则可能会丢失事务的前半部分数据
private Long findTransactionBeginPosition(ErosaConnection mysqlConnection, final EntryPosition entryPosition) throws IOException {
// 针对开始的第一条为非Begin记录,需要从该binlog扫描
final java.util.concurrent.atomic.AtomicLong preTransactionStartPosition = new java.util.concurrent.atomic.AtomicLong(0L);
mysqlConnection.reconnect();
mysqlConnection.seek(entryPosition.getJournalName(), 4L, entryPosition.getGtid(), new SinkFunction<LogEvent>() {
private LogPosition lastPosition;
public boolean sink(LogEvent event) {
try {
CanalEntry.Entry entry = parseAndProfilingIfNecessary(event, true);
if (entry == null) {
return true;
}
// 记录一下transaction begin position
if (entry.getEntryType() == CanalEntry.EntryType.TRANSACTIONBEGIN && entry.getHeader().getLogfileOffset() < entryPosition.getPosition()) {
preTransactionStartPosition.set(entry.getHeader().getLogfileOffset());
}
if (entry.getHeader().getLogfileOffset() >= entryPosition.getPosition()) {
// 退出
return false;
}
lastPosition = buildLastPosition(entry);
} catch (Exception e) {
processSinkError(e, lastPosition, entryPosition.getJournalName(), entryPosition.getPosition());
return false;
}
return running;
}
});
// 判断一下找到的最接近position的事务头的位置
if (preTransactionStartPosition.get() > entryPosition.getPosition()) {
logger.error("preTransactionEndPosition greater than startPosition from zk or localconf, maybe lost data");
throw new CanalParseException("preTransactionStartPosition greater than startPosition from zk or localconf, maybe lost data");
}
return preTransactionStartPosition.get();
}
Aggregations