use of com.qlangtech.tis.exec.ExecuteResult in project tis by qlangtech.
the class DataFlowAppSource method getProcessDataResults.
// @Override
// public List<PrimaryTableMeta> getPrimaryTabs() {
// return getErRules().getPrimaryTabs();
// }
@Override
public ExecuteResult getProcessDataResults(IExecChainContext execChainContext, ISingleTableDumpFactory singleTableDumpFactory, IDataProcessFeedback dataProcessFeedback, ITaskPhaseInfo taskPhaseInfo) throws Exception {
// 执行工作流数据结构
SqlTaskNodeMeta.SqlDataFlowTopology topology = SqlTaskNodeMeta.getSqlDataFlowTopology(dataflowName);
Map<String, TISReactor.TaskAndMilestone> /**
* taskid
*/
taskMap = Maps.newHashMap();
// 取得workflowdump需要依赖的表
Collection<DependencyNode> tables = topology.getDumpNodes();
StringBuffer dumps = new StringBuffer("dependency table:\n");
dumps.append("\t\t=======================\n");
for (DependencyNode t : tables) {
dumps.append("\t\t").append(t.getDbName()).append(".").append(t.getName()).append("[").append(t.getTabid()).append(",").append("] \n");
}
dumps.append("\t\t=======================\n");
logger.info(dumps.toString());
// 将所有的表的状态先初始化出来
DumpPhaseStatus dumpPhaseStatus = taskPhaseInfo.getPhaseStatus(execChainContext, FullbuildPhase.FullDump);
DataflowTask tabDump = null;
for (DependencyNode dump : topology.getDumpNodes()) {
tabDump = singleTableDumpFactory.createSingleTableDump(dump, false, /* isHasValidTableDump */
"tableDump.getPt()", execChainContext.getZkClient(), execChainContext, dumpPhaseStatus);
taskMap.put(dump.getId(), new TISReactor.TaskAndMilestone(tabDump));
}
if (topology.isSingleTableModel()) {
return executeDAG(execChainContext, topology, dataProcessFeedback, taskMap);
} else {
final ExecuteResult[] faildResult = new ExecuteResult[1];
TemplateContext tplContext = new TemplateContext(execChainContext);
JoinPhaseStatus joinPhaseStatus = taskPhaseInfo.getPhaseStatus(execChainContext, FullbuildPhase.JOIN);
IPluginStore<FlatTableBuilder> pluginStore = TIS.getPluginStore(FlatTableBuilder.class);
Objects.requireNonNull(pluginStore.getPlugin(), "flatTableBuilder can not be null");
// chainContext.setFlatTableBuilderPlugin(pluginStore.getPlugin());
// execChainContext.getFlatTableBuilder();
final IFlatTableBuilder flatTableBuilder = pluginStore.getPlugin();
final SqlTaskNodeMeta fNode = topology.getFinalNode();
flatTableBuilder.startTask((context) -> {
DataflowTask process = null;
for (SqlTaskNodeMeta pnode : topology.getNodeMetas()) {
/**
* ***********************************
* 构建宽表构建任务节点
* ************************************
*/
process = flatTableBuilder.createTask(pnode, StringUtils.equals(fNode.getId(), pnode.getId()), tplContext, context, joinPhaseStatus.getTaskStatus(pnode.getExportName()));
taskMap.put(pnode.getId(), new TISReactor.TaskAndMilestone(process));
}
faildResult[0] = executeDAG(execChainContext, topology, dataProcessFeedback, taskMap);
});
return faildResult[0];
}
}
Aggregations