use of com.qlangtech.tis.sql.parser.SqlTaskNodeMeta in project tis by qlangtech.
the class OfflineDatasourceAction method doUpdateTopology.
private void doUpdateTopology(Context context, TopologyUpdateCallback dbSaver) throws Exception {
final String content = IOUtils.toString(this.getRequest().getInputStream(), getEncode());
JSONTokener tokener = new JSONTokener(content);
JSONObject topology = new JSONObject(tokener);
final String topologyName = topology.getString("topologyName");
if (StringUtils.isEmpty(topologyName)) {
this.addErrorMessage(context, "请填写数据流名称");
return;
}
File parent = new File(SqlTaskNode.parent, topologyName);
FileUtils.forceMkdir(parent);
JSONArray nodes = topology.getJSONArray("nodes");
// 这部分信息暂时先不需要了,已经包含在‘nodes’中了
JSONArray edges = topology.getJSONArray("edges");
JSONObject o = null;
JSONObject nodeMeta = null;
JSONObject nestNodeMeta = null;
JSONArray joinDependencies = null;
JSONObject dep = null;
NodeType nodetype = null;
DependencyNode dnode = null;
SqlTaskNodeMeta pnode = null;
Position pos = null;
final SqlDataFlowTopology topologyPojo = new SqlDataFlowTopology();
SqlTaskNodeMeta.TopologyProfile profile = new SqlTaskNodeMeta.TopologyProfile();
profile.setName(topologyName);
profile.setTimestamp(System.currentTimeMillis());
// profile.setDataflowId(this.getWorkflowId(topologyName));
topologyPojo.setProfile(profile);
int x, y;
int tableid;
Tab tab;
for (int i = 0; i < nodes.length(); i++) {
o = nodes.getJSONObject(i);
x = o.getInt("x");
if (x < 0) {
// 如果在边界外的图形需要跳过`
continue;
}
y = o.getInt("y");
pos = new Position();
pos.setX(x);
pos.setY(y);
nodeMeta = o.getJSONObject("nodeMeta");
nestNodeMeta = nodeMeta.getJSONObject("nodeMeta");
nodetype = NodeType.parse(nestNodeMeta.getString("type"));
if (nodetype == NodeType.DUMP) {
dnode = new DependencyNode();
dnode.setExtraSql(SqlTaskNodeMeta.processBigContent(nodeMeta.getString("sqlcontent")));
dnode.setId(o.getString("id"));
tableid = nodeMeta.getInt("tabid");
Map<Integer, com.qlangtech.tis.workflow.pojo.DatasourceDb> dbMap = Maps.newHashMap();
tab = getDatabase(this.offlineDAOFacade, dbMap, tableid);
dnode.setDbName(tab.db.getName());
dnode.setName(tab.tab.getName());
dnode.setTabid(String.valueOf(tableid));
dnode.setDbid(String.valueOf(nodeMeta.get("dbid")));
dnode.setPosition(pos);
dnode.setType(NodeType.DUMP.getType());
topologyPojo.addDumpTab(dnode);
} else if (nodetype == NodeType.JOINER_SQL) {
pnode = new SqlTaskNodeMeta();
pnode.setId(o.getString("id"));
pnode.setPosition(pos);
pnode.setSql(SqlTaskNodeMeta.processBigContent(nodeMeta.getString("sql")));
pnode.setExportName(nodeMeta.getString("exportName"));
// pnode.setId(String.valueOf(nodeMeta.get("id")));
// pnode.setId(o.getString("id"));
pnode.setType(NodeType.JOINER_SQL.getType());
joinDependencies = nodeMeta.getJSONArray("dependencies");
for (int k = 0; k < joinDependencies.length(); k++) {
dep = joinDependencies.getJSONObject(k);
dnode = new DependencyNode();
dnode.setId(dep.getString("value"));
dnode.setName(dep.getString("label"));
dnode.setType(NodeType.DUMP.getType());
pnode.addDependency(dnode);
}
topologyPojo.addNodeMeta(pnode);
} else {
throw new IllegalStateException("nodetype:" + nodetype + " is illegal");
}
}
// 校验一下是否只有一个最终输出节点
List<SqlTaskNodeMeta> finalNodes = topologyPojo.getFinalNodes();
if (finalNodes.size() > 1) {
this.addErrorMessage(context, "最终输出节点(" + finalNodes.stream().map((r) -> r.getExportName()).collect(Collectors.joining(",")) + ")不能多于一个");
return;
}
if (finalNodes.size() < 1) {
// 这种情况为单表导入,不需要spark和hive的支持
// this.addErrorMessage(context, "请定义数据处理节点");
// return;
}
Optional<ERRules> erRule = ERRules.getErRule(topologyPojo.getName());
this.setBizResult(context, new ERRulesStatus(erRule));
dbSaver.execute(topologyName, topologyPojo);
// 保存一个时间戳
SqlTaskNodeMeta.persistence(topologyPojo, parent);
// 备份之用
FileUtils.write(new File(parent, topologyName + "_content.json"), content, getEncode(), false);
this.addActionMessage(context, "'" + topologyName + "'保存成功");
}
use of com.qlangtech.tis.sql.parser.SqlTaskNodeMeta in project tis by qlangtech.
the class CollectionAction method createTopology.
private SqlTaskNodeMeta.SqlDataFlowTopology createTopology(String topologyName, OfflineManager.ProcessedTable dsTable, TargetColumnMeta targetColMetas) throws Exception {
SqlTaskNodeMeta.SqlDataFlowTopology topology = new SqlTaskNodeMeta.SqlDataFlowTopology();
SqlTaskNodeMeta.TopologyProfile profile = new SqlTaskNodeMeta.TopologyProfile();
profile.setName(topologyName);
profile.setTimestamp(System.currentTimeMillis());
topology.setProfile(profile);
DependencyNode dNode = createDumpNode(dsTable);
topology.addDumpTab(dNode);
SqlTaskNodeMeta joinNodeMeta = new SqlTaskNodeMeta();
joinNodeMeta.setId(String.valueOf(UUID.randomUUID()));
joinNodeMeta.addDependency(dNode);
joinNodeMeta.setExportName(topologyName);
joinNodeMeta.setType(NodeType.JOINER_SQL.getType());
joinNodeMeta.setPosition(DEFAULT_SINGLE_JOINER_POSITION);
joinNodeMeta.setSql(ColumnMetaData.buildExtractSQL(dsTable.getName(), true, targetColMetas.targetColMetas).toString());
topology.addNodeMeta(joinNodeMeta);
ERRules.createErRule(topologyName, createDumpNode(dsTable), targetColMetas.getPKMeta());
return topology;
}
use of com.qlangtech.tis.sql.parser.SqlTaskNodeMeta in project tis by qlangtech.
the class DataFlowAppSource method getProcessDataResults.
// @Override
// public List<PrimaryTableMeta> getPrimaryTabs() {
// return getErRules().getPrimaryTabs();
// }
@Override
public ExecuteResult getProcessDataResults(IExecChainContext execChainContext, ISingleTableDumpFactory singleTableDumpFactory, IDataProcessFeedback dataProcessFeedback, ITaskPhaseInfo taskPhaseInfo) throws Exception {
// 执行工作流数据结构
SqlTaskNodeMeta.SqlDataFlowTopology topology = SqlTaskNodeMeta.getSqlDataFlowTopology(dataflowName);
Map<String, TISReactor.TaskAndMilestone> /**
* taskid
*/
taskMap = Maps.newHashMap();
// 取得workflowdump需要依赖的表
Collection<DependencyNode> tables = topology.getDumpNodes();
StringBuffer dumps = new StringBuffer("dependency table:\n");
dumps.append("\t\t=======================\n");
for (DependencyNode t : tables) {
dumps.append("\t\t").append(t.getDbName()).append(".").append(t.getName()).append("[").append(t.getTabid()).append(",").append("] \n");
}
dumps.append("\t\t=======================\n");
logger.info(dumps.toString());
// 将所有的表的状态先初始化出来
DumpPhaseStatus dumpPhaseStatus = taskPhaseInfo.getPhaseStatus(execChainContext, FullbuildPhase.FullDump);
DataflowTask tabDump = null;
for (DependencyNode dump : topology.getDumpNodes()) {
tabDump = singleTableDumpFactory.createSingleTableDump(dump, false, /* isHasValidTableDump */
"tableDump.getPt()", execChainContext.getZkClient(), execChainContext, dumpPhaseStatus);
taskMap.put(dump.getId(), new TISReactor.TaskAndMilestone(tabDump));
}
if (topology.isSingleTableModel()) {
return executeDAG(execChainContext, topology, dataProcessFeedback, taskMap);
} else {
final ExecuteResult[] faildResult = new ExecuteResult[1];
TemplateContext tplContext = new TemplateContext(execChainContext);
JoinPhaseStatus joinPhaseStatus = taskPhaseInfo.getPhaseStatus(execChainContext, FullbuildPhase.JOIN);
IPluginStore<FlatTableBuilder> pluginStore = TIS.getPluginStore(FlatTableBuilder.class);
Objects.requireNonNull(pluginStore.getPlugin(), "flatTableBuilder can not be null");
// chainContext.setFlatTableBuilderPlugin(pluginStore.getPlugin());
// execChainContext.getFlatTableBuilder();
final IFlatTableBuilder flatTableBuilder = pluginStore.getPlugin();
final SqlTaskNodeMeta fNode = topology.getFinalNode();
flatTableBuilder.startTask((context) -> {
DataflowTask process = null;
for (SqlTaskNodeMeta pnode : topology.getNodeMetas()) {
/**
* ***********************************
* 构建宽表构建任务节点
* ************************************
*/
process = flatTableBuilder.createTask(pnode, StringUtils.equals(fNode.getId(), pnode.getId()), tplContext, context, joinPhaseStatus.getTaskStatus(pnode.getExportName()));
taskMap.put(pnode.getId(), new TISReactor.TaskAndMilestone(process));
}
faildResult[0] = executeDAG(execChainContext, topology, dataProcessFeedback, taskMap);
});
return faildResult[0];
}
}
use of com.qlangtech.tis.sql.parser.SqlTaskNodeMeta in project tis by qlangtech.
the class DataFlowAppSource method getTargetEntity.
@Override
public EntityName getTargetEntity() {
try {
SqlTaskNodeMeta.SqlDataFlowTopology workflowDetail = SqlTaskNodeMeta.getSqlDataFlowTopology(dataflowName);
Objects.requireNonNull(workflowDetail, "workflowDetail can not be null");
EntityName targetEntity = null;
if (workflowDetail.isSingleTableModel()) {
DependencyNode dumpNode = workflowDetail.getDumpNodes().get(0);
targetEntity = dumpNode.parseEntityName();
} else {
SqlTaskNodeMeta finalN = workflowDetail.getFinalNode();
targetEntity = EntityName.parse(finalN.getExportName());
}
return targetEntity;
} catch (Exception e) {
throw new RuntimeException(dataflowName, e);
}
}
Aggregations