use of com.qlangtech.tis.sql.parser.SqlTaskNodeMeta.SqlDataFlowTopology in project tis by qlangtech.
the class OfflineDatasourceAction method doUpdateTopology.
private void doUpdateTopology(Context context, TopologyUpdateCallback dbSaver) throws Exception {
final String content = IOUtils.toString(this.getRequest().getInputStream(), getEncode());
JSONTokener tokener = new JSONTokener(content);
JSONObject topology = new JSONObject(tokener);
final String topologyName = topology.getString("topologyName");
if (StringUtils.isEmpty(topologyName)) {
this.addErrorMessage(context, "请填写数据流名称");
return;
}
File parent = new File(SqlTaskNode.parent, topologyName);
FileUtils.forceMkdir(parent);
JSONArray nodes = topology.getJSONArray("nodes");
// 这部分信息暂时先不需要了,已经包含在‘nodes’中了
JSONArray edges = topology.getJSONArray("edges");
JSONObject o = null;
JSONObject nodeMeta = null;
JSONObject nestNodeMeta = null;
JSONArray joinDependencies = null;
JSONObject dep = null;
NodeType nodetype = null;
DependencyNode dnode = null;
SqlTaskNodeMeta pnode = null;
Position pos = null;
final SqlDataFlowTopology topologyPojo = new SqlDataFlowTopology();
SqlTaskNodeMeta.TopologyProfile profile = new SqlTaskNodeMeta.TopologyProfile();
profile.setName(topologyName);
profile.setTimestamp(System.currentTimeMillis());
// profile.setDataflowId(this.getWorkflowId(topologyName));
topologyPojo.setProfile(profile);
int x, y;
int tableid;
Tab tab;
for (int i = 0; i < nodes.length(); i++) {
o = nodes.getJSONObject(i);
x = o.getInt("x");
if (x < 0) {
// 如果在边界外的图形需要跳过`
continue;
}
y = o.getInt("y");
pos = new Position();
pos.setX(x);
pos.setY(y);
nodeMeta = o.getJSONObject("nodeMeta");
nestNodeMeta = nodeMeta.getJSONObject("nodeMeta");
nodetype = NodeType.parse(nestNodeMeta.getString("type"));
if (nodetype == NodeType.DUMP) {
dnode = new DependencyNode();
dnode.setExtraSql(SqlTaskNodeMeta.processBigContent(nodeMeta.getString("sqlcontent")));
dnode.setId(o.getString("id"));
tableid = nodeMeta.getInt("tabid");
Map<Integer, com.qlangtech.tis.workflow.pojo.DatasourceDb> dbMap = Maps.newHashMap();
tab = getDatabase(this.offlineDAOFacade, dbMap, tableid);
dnode.setDbName(tab.db.getName());
dnode.setName(tab.tab.getName());
dnode.setTabid(String.valueOf(tableid));
dnode.setDbid(String.valueOf(nodeMeta.get("dbid")));
dnode.setPosition(pos);
dnode.setType(NodeType.DUMP.getType());
topologyPojo.addDumpTab(dnode);
} else if (nodetype == NodeType.JOINER_SQL) {
pnode = new SqlTaskNodeMeta();
pnode.setId(o.getString("id"));
pnode.setPosition(pos);
pnode.setSql(SqlTaskNodeMeta.processBigContent(nodeMeta.getString("sql")));
pnode.setExportName(nodeMeta.getString("exportName"));
// pnode.setId(String.valueOf(nodeMeta.get("id")));
// pnode.setId(o.getString("id"));
pnode.setType(NodeType.JOINER_SQL.getType());
joinDependencies = nodeMeta.getJSONArray("dependencies");
for (int k = 0; k < joinDependencies.length(); k++) {
dep = joinDependencies.getJSONObject(k);
dnode = new DependencyNode();
dnode.setId(dep.getString("value"));
dnode.setName(dep.getString("label"));
dnode.setType(NodeType.DUMP.getType());
pnode.addDependency(dnode);
}
topologyPojo.addNodeMeta(pnode);
} else {
throw new IllegalStateException("nodetype:" + nodetype + " is illegal");
}
}
// 校验一下是否只有一个最终输出节点
List<SqlTaskNodeMeta> finalNodes = topologyPojo.getFinalNodes();
if (finalNodes.size() > 1) {
this.addErrorMessage(context, "最终输出节点(" + finalNodes.stream().map((r) -> r.getExportName()).collect(Collectors.joining(",")) + ")不能多于一个");
return;
}
if (finalNodes.size() < 1) {
// 这种情况为单表导入,不需要spark和hive的支持
// this.addErrorMessage(context, "请定义数据处理节点");
// return;
}
Optional<ERRules> erRule = ERRules.getErRule(topologyPojo.getName());
this.setBizResult(context, new ERRulesStatus(erRule));
dbSaver.execute(topologyName, topologyPojo);
// 保存一个时间戳
SqlTaskNodeMeta.persistence(topologyPojo, parent);
// 备份之用
FileUtils.write(new File(parent, topologyName + "_content.json"), content, getEncode(), false);
this.addActionMessage(context, "'" + topologyName + "'保存成功");
}
use of com.qlangtech.tis.sql.parser.SqlTaskNodeMeta.SqlDataFlowTopology in project tis by qlangtech.
the class OfflineDatasourceAction method doGetWorkflowTopology.
public void doGetWorkflowTopology(Context context) throws Exception {
final String topology = this.getString("topology");
if (StringUtils.isEmpty(topology)) {
throw new IllegalStateException("please set param topology");
}
SqlDataFlowTopology wfTopology = SqlTaskNodeMeta.getSqlDataFlowTopology(topology);
this.setBizResult(context, wfTopology);
}
use of com.qlangtech.tis.sql.parser.SqlTaskNodeMeta.SqlDataFlowTopology in project tis by qlangtech.
the class OfflineDatasourceAction method doReflectTableCols.
/**
* 通过sql语句反射sql语句中的列
*/
@Func(value = PermissionConstant.PERMISSION_DATASOURCE_EDIT, sideEffect = false)
public void doReflectTableCols(Context context) throws Exception {
String topology = this.getString("topology");
if (StringUtils.isEmpty(topology)) {
throw new IllegalArgumentException("param topology can not be null");
}
SqlDataFlowTopology wfTopology = SqlTaskNodeMeta.getSqlDataFlowTopology(topology);
Map<String, DependencyNode> /**
* id
*/
dumpNodes = wfTopology.getDumpNodes().stream().collect(Collectors.toMap((d) -> d.getId(), (d) -> d));
com.alibaba.fastjson.JSONArray sqlAry = this.parseJsonArrayPost();
com.alibaba.fastjson.JSONObject j = null;
// String sql = null;
// List<RowMetaData> rowMetaData = null;
List<SqlCols> colsMeta = Lists.newArrayList();
SqlCols sqlCols = null;
DependencyNode dumpNode = null;
TISTable tabCfg = null;
// List<ColumnMetaData> reflectCols = null;
for (int i = 0; i < sqlAry.size(); i++) {
j = sqlAry.getJSONObject(i);
// sql = j.getString("sql");
sqlCols = new SqlCols();
sqlCols.setKey(j.getString("key"));
String dumpNodeId = sqlCols.getKey();
dumpNode = dumpNodes.get(dumpNodeId);
if (dumpNode == null) {
throw new IllegalStateException("key:" + dumpNodeId + " can not find relevant dump node in topplogy '" + topology + "'");
}
// tabCfg = GitUtils.$().getTableConfig(dumpNode.getDbName(), dumpNode.getName());
DataSourceFactoryPluginStore dbPlugin = TIS.getDataBasePluginStore(new PostedDSProp(dumpNode.getDbName()));
TISTable tisTable = dbPlugin.loadTableMeta(dumpNode.getName());
if (CollectionUtils.isEmpty(tisTable.getReflectCols())) {
throw new IllegalStateException("db:" + dumpNode.getDbName() + ",table:" + dumpNode.getName() + " relevant table col reflect cols can not be empty");
}
sqlCols.setCols(tisTable.getReflectCols());
colsMeta.add(sqlCols);
}
this.setBizResult(context, colsMeta);
}
use of com.qlangtech.tis.sql.parser.SqlTaskNodeMeta.SqlDataFlowTopology in project tis by qlangtech.
the class TestSqlTaskNodeMeta method testGetRewriteSql.
public void testGetRewriteSql() throws Exception {
SqlTaskNodeMeta taskNodeMeta = new SqlTaskNodeMeta();
SqlDataFlowTopology topology = SqlTaskNodeMeta.getSqlDataFlowTopology(TestSupplyGoodsParse.topologyName);
assertNotNull(topology);
SqlTaskNodeMeta finalNode = topology.getFinalNode();
assertNotNull(finalNode);
taskNodeMeta.setSql(finalNode.getSql());
Map<IDumpTable, ITabPartition> dumpPartition = Maps.newHashMap();
String pt = "20200703113848";
dumpPartition.put(EntityName.parse("scmdb.warehouse_goods"), () -> pt);
dumpPartition.put(EntityName.parse("tis.stock_info_collapse"), () -> pt);
dumpPartition.put(EntityName.parse("scmdb.supplier_goods"), () -> pt);
dumpPartition.put(EntityName.parse("tis.warehouse_collapse"), () -> pt);
dumpPartition.put(EntityName.parse("tis.supplier_collapse"), () -> pt);
dumpPartition.put(EntityName.parse("scmdb.goods"), () -> pt);
dumpPartition.put(EntityName.parse("scmdb.stock_info"), () -> pt);
dumpPartition.put(EntityName.parse("scmdb.category"), () -> pt);
dumpPartition.put(EntityName.parse("scmdb.goods_sync_shop"), () -> pt);
ITemplateContext tplContext = EasyMock.createMock("templateContext", ITemplateContext.class);
IJoinTaskContext joinTaskContext = EasyMock.createMock("joinTaskContext", IJoinTaskContext.class);
EasyMock.expect(tplContext.getExecContext()).andReturn(joinTaskContext);
EasyMock.expect(joinTaskContext.getExecutePhaseRange()).andReturn(ExecutePhaseRange.fullRange()).times(2);
EasyMock.expect(joinTaskContext.getIndexShardCount()).andReturn(1).times(1);
Optional<ERRules> erRule = ERRules.getErRule(TestSupplyGoodsParse.topologyName);
assertTrue(erRule.isPresent());
EasyMock.replay(tplContext, joinTaskContext);
ISqlTask.RewriteSql rewriteSql = taskNodeMeta.getRewriteSql("supply_goods", new TabPartitions(dumpPartition), erRule.get(), tplContext, true);
assertNotNull(rewriteSql);
assertEquals(TestSqlRewriter.getScriptContent("supply_goods_rewrite_result.txt"), rewriteSql.sqlContent);
System.out.println(rewriteSql.sqlContent);
EasyMock.verify(tplContext, joinTaskContext);
}
use of com.qlangtech.tis.sql.parser.SqlTaskNodeMeta.SqlDataFlowTopology in project tis by qlangtech.
the class SqlTaskBaseTestCase method parseTopologySqlTaskNodes.
protected List<SqlTaskNode> parseTopologySqlTaskNodes(String topologyName) throws Exception {
SqlDataFlowTopology topology = SqlTaskNodeMeta.getSqlDataFlowTopology(topologyName);
// SqlTaskNode.parseTaskNodes(topology);
List<SqlTaskNode> taskNodes = topology.parseTaskNodes();
return taskNodes;
}
Aggregations