use of com.alibaba.otter.shared.common.model.config.data.DataMedia in project otter by alibaba.
the class DataSourceInfo method execute.
public void execute(@Param("dataMediaSourceId") Long dataMediaSourceId, Context context) throws Exception {
DataMediaSource dataMediaSource = dataMediaSourceService.findById(dataMediaSourceId);
// 查询dataSource关联的同步任务
List<DataMedia> dataMedias = dataMediaService.listByDataMediaSourceId(dataMediaSource.getId());
context.put("source", dataMediaSource);
context.put("dataMedias", dataMedias);
}
use of com.alibaba.otter.shared.common.model.config.data.DataMedia in project otter by alibaba.
the class SelectDataMedia method execute.
public void execute(@Param("pageIndex") int pageIndex, @Param("searchKey") String searchKey, @Param("local") String local, Context context) throws Exception {
@SuppressWarnings("unchecked") Map<String, Object> condition = new HashMap<String, Object>();
if ("请输入关键字(目前支持DataMedia的ID、名字搜索)".equals(searchKey)) {
searchKey = "";
}
condition.put("searchKey", searchKey);
int count = dataMediaService.getCount(condition);
Paginator paginator = new Paginator();
paginator.setItems(count);
paginator.setPage(pageIndex);
condition.put("offset", paginator.getOffset());
condition.put("length", paginator.getLength());
List<DataMedia> dataMedias = dataMediaService.listByCondition(condition);
context.put("dataMedias", dataMedias);
context.put("paginator", paginator);
context.put("searchKey", searchKey);
context.put("local", local);
}
use of com.alibaba.otter.shared.common.model.config.data.DataMedia in project otter by alibaba.
the class DataBatchLoader method split.
/**
* 将rowBatch中的记录,按找载入的目标数据源进行分类
*/
private List<RowBatch> split(RowBatch rowBatch) {
final Identity identity = rowBatch.getIdentity();
Map<DataMediaSource, RowBatch> result = new MapMaker().makeComputingMap(new Function<DataMediaSource, RowBatch>() {
public RowBatch apply(DataMediaSource input) {
RowBatch rowBatch = new RowBatch();
rowBatch.setIdentity(identity);
return rowBatch;
}
});
for (EventData eventData : rowBatch.getDatas()) {
// 获取介质信息
DataMedia media = ConfigHelper.findDataMedia(configClientService.findPipeline(identity.getPipelineId()), eventData.getTableId());
// 归类
result.get(media.getSource()).merge(eventData);
}
return new ArrayList<RowBatch>(result.values());
}
use of com.alibaba.otter.shared.common.model.config.data.DataMedia in project otter by alibaba.
the class DbLoadAction method doDdl.
/**
* 执行ddl的调用,处理逻辑比较简单: 串行调用
*
* @param context
* @param eventDatas
*/
private void doDdl(DbLoadContext context, List<EventData> eventDatas) {
for (final EventData data : eventDatas) {
DataMedia dataMedia = ConfigHelper.findDataMedia(context.getPipeline(), data.getTableId());
final DbDialect dbDialect = dbDialectFactory.getDbDialect(context.getIdentity().getPipelineId(), (DbMediaSource) dataMedia.getSource());
Boolean skipDdlException = context.getPipeline().getParameters().getSkipDdlException();
try {
Boolean result = dbDialect.getJdbcTemplate().execute(new StatementCallback<Boolean>() {
public Boolean doInStatement(Statement stmt) throws SQLException, DataAccessException {
Boolean result = false;
if (dbDialect instanceof MysqlDialect && StringUtils.isNotEmpty(data.getDdlSchemaName())) {
// 如果mysql,执行ddl时,切换到在源库执行的schema上
// result &= stmt.execute("use " + data.getDdlSchemaName());
// 解决当数据库名称为关键字如"Order"的时候,会报错,无法同步
result &= stmt.execute("use `" + data.getDdlSchemaName() + "`");
}
result &= stmt.execute(data.getSql());
return result;
}
});
if (result) {
// 记录为成功处理的sql
context.getProcessedDatas().add(data);
} else {
context.getFailedDatas().add(data);
}
} catch (Throwable e) {
if (skipDdlException) {
// do skip
logger.warn("skip exception for ddl : {} , caused by {}", data, ExceptionUtils.getFullStackTrace(e));
} else {
throw new LoadException(e);
}
}
}
}
use of com.alibaba.otter.shared.common.model.config.data.DataMedia in project otter by alibaba.
the class DatabaseExtractor method extract.
@Override
public void extract(DbBatch dbBatch) throws ExtractException {
Assert.notNull(dbBatch);
Assert.notNull(dbBatch.getRowBatch());
// 读取配置
Pipeline pipeline = getPipeline(dbBatch.getRowBatch().getIdentity().getPipelineId());
boolean mustDb = pipeline.getParameters().getSyncConsistency().isMedia();
// 如果是行记录是必须进行数据库反查
boolean isRow = pipeline.getParameters().getSyncMode().isRow();
// 读取一次配置
// 调整下线程池,Extractor会被池化处理
adjustPoolSize(pipeline.getParameters().getExtractPoolSize());
ExecutorCompletionService completionService = new ExecutorCompletionService(executor);
// 进行并发提交
ExtractException exception = null;
// 每个表进行处理
List<DataItem> items = new ArrayList<DataItem>();
List<Future> futures = new ArrayList<Future>();
List<EventData> eventDatas = dbBatch.getRowBatch().getDatas();
for (EventData eventData : eventDatas) {
if (eventData.getEventType().isDdl()) {
continue;
}
DataItem item = new DataItem(eventData);
// 针对row模式,需要去检查一下当前是否已经包含row记录的所有字段,如果发现字段不足,则执行一次数据库查询
boolean flag = mustDb || (eventData.getSyncConsistency() != null && eventData.getSyncConsistency().isMedia());
// 增加一种case, 针对oracle erosa有时侯结果记录只有主键,没有变更字段,需要做一次反查
if (!flag && CollectionUtils.isEmpty(eventData.getUpdatedColumns())) {
DataMedia dataMedia = ConfigHelper.findDataMedia(pipeline, eventData.getTableId());
if (dataMedia.getSource().getType().isOracle()) {
flag |= true;
// 针对这类数据,也统一视为补救的操作,可能erosa解析时反查数据库也不存在记录
eventData.setRemedy(true);
}
}
if (isRow && !flag) {
// 提前判断一次,避免进入多线程进行竞争
// 针对view视图的情况,会有后续再判断一次
flag = checkNeedDbForRowMode(pipeline, eventData);
}
if (flag && (eventData.getEventType().isInsert() || eventData.getEventType().isUpdate())) {
// 判断是否需要反查
// 提交进行并行查询
Future future = completionService.submit(new DatabaseExtractWorker(pipeline, item), null);
if (future.isDone()) {
// 立即判断一次,因为使用了CallerRun可能当场跑出结果,针对有异常时快速响应,而不是等跑完所有的才抛异常
try {
future.get();
} catch (InterruptedException e) {
// 取消完之后立马退出
cancel(futures);
throw new ExtractException(e);
} catch (ExecutionException e) {
// 取消完之后立马退出
cancel(futures);
throw new ExtractException(e);
}
}
// 记录一下添加的任务
futures.add(future);
}
// 按顺序添加
items.add(item);
}
// 开始处理结果
int index = 0;
while (index < futures.size()) {
// 循环处理发出去的所有任务
try {
// 它也可能被打断
Future future = completionService.take();
future.get();
} catch (InterruptedException e) {
exception = new ExtractException(e);
// 如何一个future出现了异常,就退出
break;
} catch (ExecutionException e) {
exception = new ExtractException(e);
// 如何一个future出现了异常,就退出
break;
}
index++;
}
if (index < futures.size()) {
// 小于代表有错误,需要对未完成的记录进行cancel操作,对已完成的结果进行收集,做重复录入过滤记录
cancel(futures);
throw exception;
} else {
// 全部成功分支, 构造返回结果也要保证原始的顺序
for (int i = 0; i < items.size(); i++) {
DataItem item = items.get(i);
if (item.filter) {
// 忽略需要被过滤的数据,比如数据库反查时记录已经不存在
eventDatas.remove(item.getEventData());
}
}
}
}
Aggregations