use of com.alibaba.otter.canal.protocol.CanalEntry.Entry in project otter by alibaba.
the class OtterDownStreamHandlerIntergration method buildEvent.
private Event buildEvent() {
Event event = new Event();
event.setLogIdentity(new LogIdentity());
Header.Builder headBuilder = Header.newBuilder();
headBuilder.setEventLength(1000L);
headBuilder.setExecuteTime(new Date().getTime());
headBuilder.setLogfileName("mysql-bin.000001");
headBuilder.setLogfileOffset(1000L);
headBuilder.setSchemaName("test");
headBuilder.setTableName("ljh");
Entry.Builder entryBuilder = Entry.newBuilder();
entryBuilder.setHeader(headBuilder.build());
entryBuilder.setEntryType(EntryType.ROWDATA);
RowChange.Builder rowChangeBuilder = RowChange.newBuilder();
RowData.Builder rowDataBuilder = RowData.newBuilder();
rowChangeBuilder.addRowDatas(rowDataBuilder.build());
entryBuilder.setStoreValue(rowChangeBuilder.build().toByteString());
Entry entry = entryBuilder.build();
event.setEntry(entry);
return event;
}
use of com.alibaba.otter.canal.protocol.CanalEntry.Entry in project otter by alibaba.
the class MessageParser method parse.
/**
* 将对应canal送出来的Entry对象解析为otter使用的内部对象
*
* <pre>
* 需要处理数据过滤:
* 1. Transaction Begin/End过滤
* 2. retl.retl_client/retl.retl_mark 回环标记处理以及后续的回环数据过滤
* 3. retl.xdual canal心跳表数据过滤
* </pre>
*/
public List<EventData> parse(Long pipelineId, List<Entry> datas) throws SelectException {
List<EventData> eventDatas = new ArrayList<EventData>();
Pipeline pipeline = configClientService.findPipeline(pipelineId);
List<Entry> transactionDataBuffer = new ArrayList<Entry>();
// hz为主站点,us->hz的数据,需要回环同步会us。并且需要开启回环补救算法
PipelineParameter pipelineParameter = pipeline.getParameters();
boolean enableLoopbackRemedy = pipelineParameter.isEnableRemedy() && pipelineParameter.isHome() && pipelineParameter.getRemedyAlgorithm().isLoopback();
boolean isLoopback = false;
// 判断是否属于需要loopback处理的类型,只处理正常otter同步产生的回环数据,因为会有业务方手工屏蔽同步的接口,避免回环
boolean needLoopback = false;
long now = new Date().getTime();
try {
for (Entry entry : datas) {
switch(entry.getEntryType()) {
case TRANSACTIONBEGIN:
isLoopback = false;
break;
case ROWDATA:
String tableName = entry.getHeader().getTableName();
// 判断是否是回环表retl_mark
boolean isMarkTable = tableName.equalsIgnoreCase(pipeline.getParameters().getSystemMarkTable());
if (isMarkTable) {
RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
if (!rowChange.getIsDdl()) {
int loopback = checkLoopback(pipeline, rowChange.getRowDatas(0));
if (loopback == 2) {
// 只处理正常同步产生的回环数据
needLoopback |= true;
}
isLoopback |= loopback > 0;
}
}
// 检查下otter3.0的回环表,对应的schmea会比较随意,所以不做比较
boolean isCompatibleLoopback = tableName.equalsIgnoreCase(compatibleMarkTable);
if (isCompatibleLoopback) {
RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
if (!rowChange.getIsDdl()) {
int loopback = checkCompatibleLoopback(pipeline, rowChange.getRowDatas(0));
if (loopback == 2) {
// 只处理正常同步产生的回环数据
needLoopback |= true;
}
isLoopback |= loopback > 0;
}
}
if ((!isLoopback || (enableLoopbackRemedy && needLoopback)) && !isMarkTable && !isCompatibleLoopback) {
transactionDataBuffer.add(entry);
}
break;
case TRANSACTIONEND:
if (!isLoopback || (enableLoopbackRemedy && needLoopback)) {
// 添加数据解析
for (Entry bufferEntry : transactionDataBuffer) {
List<EventData> parseDatas = internParse(pipeline, bufferEntry);
if (CollectionUtils.isEmpty(parseDatas)) {
// 可能为空,针对ddl返回时就为null
continue;
}
// 初步计算一下事件大小
long totalSize = bufferEntry.getHeader().getEventLength();
long eachSize = totalSize / parseDatas.size();
for (EventData eventData : parseDatas) {
if (eventData == null) {
continue;
}
// 记录一下大小
eventData.setSize(eachSize);
if (needLoopback) {
// 如果延迟超过指定的阀值,则设置为需要反查db
if (now - eventData.getExecuteTime() > 1000 * pipeline.getParameters().getRemedyDelayThresoldForMedia()) {
eventData.setSyncConsistency(SyncConsistency.MEDIA);
} else {
eventData.setSyncConsistency(SyncConsistency.BASE);
}
eventData.setRemedy(true);
}
eventDatas.add(eventData);
}
}
}
isLoopback = false;
needLoopback = false;
transactionDataBuffer.clear();
break;
default:
break;
}
}
// 添加最后一次的数据,可能没有TRANSACTIONEND
if (!isLoopback || (enableLoopbackRemedy && needLoopback)) {
// 添加数据解析
for (Entry bufferEntry : transactionDataBuffer) {
List<EventData> parseDatas = internParse(pipeline, bufferEntry);
if (CollectionUtils.isEmpty(parseDatas)) {
// 可能为空,针对ddl返回时就为null
continue;
}
// 初步计算一下事件大小
long totalSize = bufferEntry.getHeader().getEventLength();
long eachSize = totalSize / parseDatas.size();
for (EventData eventData : parseDatas) {
if (eventData == null) {
continue;
}
// 记录一下大小
eventData.setSize(eachSize);
if (needLoopback) {
// 如果延迟超过指定的阀值,则设置为需要反查db
if (now - eventData.getExecuteTime() > 1000 * pipeline.getParameters().getRemedyDelayThresoldForMedia()) {
eventData.setSyncConsistency(SyncConsistency.MEDIA);
} else {
eventData.setSyncConsistency(SyncConsistency.BASE);
}
}
eventDatas.add(eventData);
}
}
}
} catch (Exception e) {
throw new SelectException(e);
}
return eventDatas;
}
use of com.alibaba.otter.canal.protocol.CanalEntry.Entry in project canal by alibaba.
the class LogEventConvert method parseRowsEvent.
private Entry parseRowsEvent(RowsLogEvent event) {
if (filterRows) {
return null;
}
try {
TableMapLogEvent table = event.getTable();
if (table == null) {
// tableId对应的记录不存在
throw new TableIdNotFoundException("not found tableId:" + event.getTableId());
}
String fullname = table.getDbName() + "." + table.getTableName();
// check name filter
if (nameFilter != null && !nameFilter.filter(fullname)) {
return null;
}
if (nameBlackFilter != null && nameBlackFilter.filter(fullname)) {
return null;
}
if (tableMetaCache.isOnRDS() && "mysql.ha_health_check".equals(fullname)) {
// 忽略rds模式的mysql.ha_health_check心跳数据
return null;
}
EventType eventType = null;
int type = event.getHeader().getType();
if (LogEvent.WRITE_ROWS_EVENT_V1 == type || LogEvent.WRITE_ROWS_EVENT == type) {
eventType = EventType.INSERT;
} else if (LogEvent.UPDATE_ROWS_EVENT_V1 == type || LogEvent.UPDATE_ROWS_EVENT == type) {
eventType = EventType.UPDATE;
} else if (LogEvent.DELETE_ROWS_EVENT_V1 == type || LogEvent.DELETE_ROWS_EVENT == type) {
eventType = EventType.DELETE;
} else {
throw new CanalParseException("unsupport event type :" + event.getHeader().getType());
}
Header header = createHeader(binlogFileName, event.getHeader(), table.getDbName(), table.getTableName(), eventType);
RowChange.Builder rowChangeBuider = RowChange.newBuilder();
rowChangeBuider.setTableId(event.getTableId());
rowChangeBuider.setIsDdl(false);
rowChangeBuider.setEventType(eventType);
RowsLogBuffer buffer = event.getRowsBuf(charset.name());
BitSet columns = event.getColumns();
BitSet changeColumns = event.getChangeColumns();
boolean tableError = false;
TableMeta tableMeta = null;
if (tableMetaCache != null) {
// 入错存在table meta cache
tableMeta = getTableMeta(table.getDbName(), table.getTableName(), true);
if (tableMeta == null) {
tableError = true;
if (!filterTableError) {
throw new CanalParseException("not found [" + fullname + "] in db , pls check!");
}
}
}
while (buffer.nextOneRow(columns)) {
// 处理row记录
RowData.Builder rowDataBuilder = RowData.newBuilder();
if (EventType.INSERT == eventType) {
// insert的记录放在before字段中
tableError |= parseOneRow(rowDataBuilder, event, buffer, columns, true, tableMeta);
} else if (EventType.DELETE == eventType) {
// delete的记录放在before字段中
tableError |= parseOneRow(rowDataBuilder, event, buffer, columns, false, tableMeta);
} else {
// update需要处理before/after
tableError |= parseOneRow(rowDataBuilder, event, buffer, columns, false, tableMeta);
if (!buffer.nextOneRow(changeColumns)) {
rowChangeBuider.addRowDatas(rowDataBuilder.build());
break;
}
tableError |= parseOneRow(rowDataBuilder, event, buffer, changeColumns, true, tableMeta);
}
rowChangeBuider.addRowDatas(rowDataBuilder.build());
}
RowChange rowChange = rowChangeBuider.build();
if (tableError) {
Entry entry = createEntry(header, EntryType.ROWDATA, ByteString.EMPTY);
logger.warn("table parser error : {}storeValue: {}", entry.toString(), rowChange.toString());
return null;
} else {
Entry entry = createEntry(header, EntryType.ROWDATA, rowChangeBuider.build().toByteString());
return entry;
}
} catch (Exception e) {
throw new CanalParseException("parse row data failed.", e);
}
}
use of com.alibaba.otter.canal.protocol.CanalEntry.Entry in project canal by alibaba.
the class EventTransactionBufferTest method testTransactionFlush.
@Test
public void testTransactionFlush() {
final int bufferSize = 64;
final int transactionSize = 5;
EventTransactionBuffer buffer = new EventTransactionBuffer();
buffer.setBufferSize(bufferSize);
buffer.setFlushCallback(new TransactionFlushCallback() {
public void flush(List<Entry> transaction) throws InterruptedException {
Assert.assertEquals(transactionSize, transaction.size());
System.out.println("\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>");
for (Entry data : transaction) {
CanalEntry.Header header = data.getHeader();
Date date = new Date(header.getExecuteTime());
SimpleDateFormat format = new SimpleDateFormat(DATE_FORMAT);
if (data.getEntryType() == EntryType.TRANSACTIONBEGIN || data.getEntryType() == EntryType.TRANSACTIONEND) {
System.out.println(data.getEntryType());
} else {
System.out.println(MessageFormat.format(messgae, new Object[] { Thread.currentThread().getName(), header.getLogfileName(), header.getLogfileOffset(), format.format(date), header.getSchemaName(), header.getTableName() }));
}
}
System.out.println("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n");
}
});
buffer.start();
try {
for (int i = 0; i < transactionSize * 10; i++) {
if (i % transactionSize == 0) {
buffer.add(buildEntry("1", 1L + i, 40L + i, EntryType.TRANSACTIONBEGIN));
} else if ((i + 1) % transactionSize == 0) {
buffer.add(buildEntry("1", 1L + i, 40L + i, EntryType.TRANSACTIONEND));
} else {
buffer.add(buildEntry("1", 1L + i, 40L + i));
}
}
} catch (InterruptedException e) {
Assert.fail(e.getMessage());
}
buffer.stop();
}
use of com.alibaba.otter.canal.protocol.CanalEntry.Entry in project canal by alibaba.
the class LocalBinlogEventParserTest method test_timestamp.
@Test
public void test_timestamp() throws InterruptedException {
final TimeoutChecker timeoutChecker = new TimeoutChecker(300 * 1000);
final AtomicLong entryCount = new AtomicLong(0);
final EntryPosition entryPosition = new EntryPosition();
final EntryPosition defaultPosition = buildPosition("mysql-bin.000001", null, 1322803601000L);
final LocalBinlogEventParser controller = new LocalBinlogEventParser();
controller.setMasterPosition(defaultPosition);
controller.setMasterInfo(buildAuthentication());
controller.setDirectory(directory);
controller.setEventSink(new AbstractCanalEventSinkTest<List<Entry>>() {
@Override
public boolean sink(List<Entry> entrys, InetSocketAddress remoteAddress, String destination) throws CanalSinkException {
for (Entry entry : entrys) {
entryCount.incrementAndGet();
String logfilename = entry.getHeader().getLogfileName();
long logfileoffset = entry.getHeader().getLogfileOffset();
long executeTime = entry.getHeader().getExecuteTime();
entryPosition.setJournalName(logfilename);
entryPosition.setPosition(logfileoffset);
entryPosition.setTimestamp(executeTime);
break;
}
controller.stop();
timeoutChecker.stop();
timeoutChecker.touch();
return true;
}
});
controller.setLogPositionManager(new AbstractCanalLogPositionManager() {
public void persistLogPosition(String destination, LogPosition logPosition) {
System.out.println(logPosition);
}
@Override
public LogPosition getLatestIndexBy(String destination) {
return null;
}
});
controller.start();
timeoutChecker.waitForIdle();
if (controller.isStart()) {
controller.stop();
}
// check
Assert.assertTrue(entryCount.get() > 0);
// 对比第一条数据和起始的position相同
Assert.assertEquals(entryPosition.getJournalName(), "mysql-bin.000001");
Assert.assertTrue(entryPosition.getPosition() <= 6163L);
Assert.assertTrue(entryPosition.getTimestamp() <= defaultPosition.getTimestamp());
}
Aggregations