use of com.alibaba.otter.shared.common.model.config.pipeline.PipelineParameter in project otter by alibaba.
the class FileLoadActionTest method buildPipeline.
protected Pipeline buildPipeline(final int fileDataStartIndex, int fileDataCount) {
final Pipeline pipeline = new Pipeline();
pipeline.setParameters(new PipelineParameter());
int dataMediaPairCount = fileDataCount / NUMBER_OF_FILE_DATA_COPIES;
pipeline.setPairs(new ArrayList<DataMediaPair>(dataMediaPairCount));
for (int i = fileDataStartIndex; i < dataMediaPairCount; i++) {
DataMediaPair dataMediaPair = buildDataMediaPair(i, i);
pipeline.getPairs().add(dataMediaPair);
}
return pipeline;
}
use of com.alibaba.otter.shared.common.model.config.pipeline.PipelineParameter in project otter by alibaba.
the class OtterTransformerTest method test_rowData_mysql_oracle.
@Test
public void test_rowData_mysql_oracle() {
final Pipeline pipeline = new Pipeline();
pipeline.setId(100L);
List<DataMediaPair> pairs = new ArrayList<DataMediaPair>();
DataMediaPair pair1 = new DataMediaPair();
pair1.setId(1L);
pair1.setPipelineId(pipeline.getId());
pair1.setPullWeight(1L);
pair1.setPushWeight(1L);
DbDataMedia mysqlMedia = getMysqlMedia();
mysqlMedia.setId(1L);
pair1.setSource(mysqlMedia);
DbDataMedia oracleMedia = getOracleMedia();
pair1.setTarget(oracleMedia);
pairs.add(pair1);
pipeline.setPairs(pairs);
PipelineParameter param = new PipelineParameter();
param.setSyncMode(SyncMode.ROW);
pipeline.setParameters(param);
new NonStrictExpectations() {
{
configClientService.findPipeline(anyLong);
returns(pipeline);
}
};
Identity identity = new Identity();
identity.setChannelId(100L);
identity.setPipelineId(100L);
identity.setProcessId(100L);
RowBatch rowBatch = new RowBatch();
rowBatch.setIdentity(identity);
EventData eventData = new EventData();
eventData.setTableId(1L);
eventData.setSchemaName("srf");
eventData.setTableName("columns");
eventData.setEventType(EventType.UPDATE);
eventData.setExecuteTime(100L);
eventData.getKeys().add(buildColumn("id", Types.INTEGER, "1", true, false));
eventData.getKeys().add(buildColumn("name", Types.VARCHAR, "ljh", true, false));
eventData.getColumns().add(buildColumn("alias_name", Types.CHAR, "hello", false, false));
eventData.getColumns().add(buildColumn("amount", Types.DECIMAL, "100.01", false, false));
eventData.getColumns().add(buildColumn("text_b", Types.BLOB, "[116,101,120,116,95,98]", false, false));
eventData.getColumns().add(buildColumn("text_c", Types.CLOB, "text_c", false, false));
eventData.getColumns().add(buildColumn("curr_date", Types.DATE, "2011-01-01", false, false));
eventData.getColumns().add(buildColumn("gmt_create", Types.TIMESTAMP, "2011-01-01 11:11:11", false, false));
eventData.getColumns().add(buildColumn("gmt_modify", Types.TIMESTAMP, "2011-01-01 11:11:11", false, false));
rowBatch.merge(eventData);
Map<Class, BatchObject> batchs = otterTransformFactory.transform(rowBatch);
RowBatch result = (RowBatch) batchs.get(EventData.class);
want.number(result.getDatas().size()).isEqualTo(1);
}
use of com.alibaba.otter.shared.common.model.config.pipeline.PipelineParameter in project otter by alibaba.
the class ChannelServiceImpl method doToModel.
/**
* <pre>
* 用于DO对象转化为Model对象
* 现阶段优化:
* 需要五次SQL交互:pipeline\node\dataMediaPair\dataMedia\dataMediaSource(五个层面)
* 目前优化方案为单层只执行一次SQL,避免重复循环造成IO及数据库查询开销
* 长期优化:
* 对SQL进行改造,尽量减小SQL调用次数
* </pre>
*
* @param channelDO
* @return Channel
*/
private Channel doToModel(ChannelDO channelDo) {
Channel channel = new Channel();
try {
channel.setId(channelDo.getId());
channel.setName(channelDo.getName());
channel.setDescription(channelDo.getDescription());
channel.setStatus(arbitrateManageService.channelEvent().status(channelDo.getId()));
channel.setParameters(channelDo.getParameters());
channel.setGmtCreate(channelDo.getGmtCreate());
channel.setGmtModified(channelDo.getGmtModified());
List<Pipeline> pipelines = pipelineService.listByChannelIds(channelDo.getId());
// 合并PipelineParameter和ChannelParameter
SystemParameter systemParameter = systemParameterService.find();
for (Pipeline pipeline : pipelines) {
PipelineParameter parameter = new PipelineParameter();
parameter.merge(systemParameter);
parameter.merge(channel.getParameters());
// 最后复制pipelineId参数
parameter.merge(pipeline.getParameters());
pipeline.setParameters(parameter);
// pipeline.getParameters().merge(channel.getParameters());
}
channel.setPipelines(pipelines);
} catch (Exception e) {
logger.error("ERROR ## change the channel DO to Model has an exception");
throw new ManagerException(e);
}
return channel;
}
use of com.alibaba.otter.shared.common.model.config.pipeline.PipelineParameter in project otter by alibaba.
the class ChannelServiceImpl method doToModel.
/**
* <pre>
* 用于DO对象数组转化为Model对象数组
* 现阶段优化:
* 需要五次SQL交互:pipeline\node\dataMediaPair\dataMedia\dataMediaSource(五个层面)
* 目前优化方案为单层只执行一次SQL,避免重复循环造成IO及数据库查询开销
* 长期优化:
* 对SQL进行改造,尽量减小SQL调用次数
* </pre>
*
* @param channelDO
* @return Channel
*/
private List<Channel> doToModel(List<ChannelDO> channelDos) {
List<Channel> channels = new ArrayList<Channel>();
try {
// 1.将ChannelID单独拿出来
List<Long> channelIds = new ArrayList<Long>();
for (ChannelDO channelDo : channelDos) {
channelIds.add(channelDo.getId());
}
Long[] idArray = new Long[channelIds.size()];
// 拿到所有的Pipeline进行ChannelID过滤,避免重复查询。
List<Pipeline> pipelines = pipelineService.listByChannelIds(channelIds.toArray(idArray));
SystemParameter systemParameter = systemParameterService.find();
for (ChannelDO channelDo : channelDos) {
Channel channel = new Channel();
channel.setId(channelDo.getId());
channel.setName(channelDo.getName());
channel.setDescription(channelDo.getDescription());
ChannelStatus channelStatus = arbitrateManageService.channelEvent().status(channelDo.getId());
channel.setStatus(null == channelStatus ? ChannelStatus.STOP : channelStatus);
channel.setParameters(channelDo.getParameters());
channel.setGmtCreate(channelDo.getGmtCreate());
channel.setGmtModified(channelDo.getGmtModified());
// 遍历,将该Channel节点下的Pipeline提取出来。
List<Pipeline> subPipelines = new ArrayList<Pipeline>();
for (Pipeline pipeline : pipelines) {
if (pipeline.getChannelId().equals(channelDo.getId())) {
// 合并PipelineParameter和ChannelParameter
PipelineParameter parameter = new PipelineParameter();
parameter.merge(systemParameter);
parameter.merge(channel.getParameters());
// 最后复制pipelineId参数
parameter.merge(pipeline.getParameters());
pipeline.setParameters(parameter);
subPipelines.add(pipeline);
}
}
channel.setPipelines(subPipelines);
channels.add(channel);
}
} catch (Exception e) {
logger.error("ERROR ## change the channels DO to Model has an exception");
throw new ManagerException(e);
}
return channels;
}
use of com.alibaba.otter.shared.common.model.config.pipeline.PipelineParameter in project otter by alibaba.
the class MessageParser method parse.
/**
* 将对应canal送出来的Entry对象解析为otter使用的内部对象
*
* <pre>
* 需要处理数据过滤:
* 1. Transaction Begin/End过滤
* 2. retl.retl_client/retl.retl_mark 回环标记处理以及后续的回环数据过滤
* 3. retl.xdual canal心跳表数据过滤
* </pre>
*/
public List<EventData> parse(Long pipelineId, List<Entry> datas) throws SelectException {
List<EventData> eventDatas = new ArrayList<EventData>();
Pipeline pipeline = configClientService.findPipeline(pipelineId);
List<Entry> transactionDataBuffer = new ArrayList<Entry>();
// hz为主站点,us->hz的数据,需要回环同步会us。并且需要开启回环补救算法
PipelineParameter pipelineParameter = pipeline.getParameters();
boolean enableLoopbackRemedy = pipelineParameter.isEnableRemedy() && pipelineParameter.isHome() && pipelineParameter.getRemedyAlgorithm().isLoopback();
boolean isLoopback = false;
// 判断是否属于需要loopback处理的类型,只处理正常otter同步产生的回环数据,因为会有业务方手工屏蔽同步的接口,避免回环
boolean needLoopback = false;
long now = new Date().getTime();
try {
for (Entry entry : datas) {
switch(entry.getEntryType()) {
case TRANSACTIONBEGIN:
isLoopback = false;
break;
case ROWDATA:
String tableName = entry.getHeader().getTableName();
// 判断是否是回环表retl_mark
boolean isMarkTable = tableName.equalsIgnoreCase(pipeline.getParameters().getSystemMarkTable());
if (isMarkTable) {
RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
if (!rowChange.getIsDdl()) {
int loopback = checkLoopback(pipeline, rowChange.getRowDatas(0));
if (loopback == 2) {
// 只处理正常同步产生的回环数据
needLoopback |= true;
}
isLoopback |= loopback > 0;
}
}
// 检查下otter3.0的回环表,对应的schmea会比较随意,所以不做比较
boolean isCompatibleLoopback = tableName.equalsIgnoreCase(compatibleMarkTable);
if (isCompatibleLoopback) {
RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
if (!rowChange.getIsDdl()) {
int loopback = checkCompatibleLoopback(pipeline, rowChange.getRowDatas(0));
if (loopback == 2) {
// 只处理正常同步产生的回环数据
needLoopback |= true;
}
isLoopback |= loopback > 0;
}
}
if ((!isLoopback || (enableLoopbackRemedy && needLoopback)) && !isMarkTable && !isCompatibleLoopback) {
transactionDataBuffer.add(entry);
}
break;
case TRANSACTIONEND:
if (!isLoopback || (enableLoopbackRemedy && needLoopback)) {
// 添加数据解析
for (Entry bufferEntry : transactionDataBuffer) {
List<EventData> parseDatas = internParse(pipeline, bufferEntry);
if (CollectionUtils.isEmpty(parseDatas)) {
// 可能为空,针对ddl返回时就为null
continue;
}
// 初步计算一下事件大小
long totalSize = bufferEntry.getHeader().getEventLength();
long eachSize = totalSize / parseDatas.size();
for (EventData eventData : parseDatas) {
if (eventData == null) {
continue;
}
// 记录一下大小
eventData.setSize(eachSize);
if (needLoopback) {
// 如果延迟超过指定的阀值,则设置为需要反查db
if (now - eventData.getExecuteTime() > 1000 * pipeline.getParameters().getRemedyDelayThresoldForMedia()) {
eventData.setSyncConsistency(SyncConsistency.MEDIA);
} else {
eventData.setSyncConsistency(SyncConsistency.BASE);
}
eventData.setRemedy(true);
}
eventDatas.add(eventData);
}
}
}
isLoopback = false;
needLoopback = false;
transactionDataBuffer.clear();
break;
default:
break;
}
}
// 添加最后一次的数据,可能没有TRANSACTIONEND
if (!isLoopback || (enableLoopbackRemedy && needLoopback)) {
// 添加数据解析
for (Entry bufferEntry : transactionDataBuffer) {
List<EventData> parseDatas = internParse(pipeline, bufferEntry);
if (CollectionUtils.isEmpty(parseDatas)) {
// 可能为空,针对ddl返回时就为null
continue;
}
// 初步计算一下事件大小
long totalSize = bufferEntry.getHeader().getEventLength();
long eachSize = totalSize / parseDatas.size();
for (EventData eventData : parseDatas) {
if (eventData == null) {
continue;
}
// 记录一下大小
eventData.setSize(eachSize);
if (needLoopback) {
// 如果延迟超过指定的阀值,则设置为需要反查db
if (now - eventData.getExecuteTime() > 1000 * pipeline.getParameters().getRemedyDelayThresoldForMedia()) {
eventData.setSyncConsistency(SyncConsistency.MEDIA);
} else {
eventData.setSyncConsistency(SyncConsistency.BASE);
}
}
eventDatas.add(eventData);
}
}
}
} catch (Exception e) {
throw new SelectException(e);
}
return eventDatas;
}
Aggregations