use of com.qlangtech.tis.offline.FlatTableBuilder in project plugins by qlangtech.
the class TestHiveFlatTableBuilder method testCreate.
public void testCreate() {
// assertNotNull(TSearcherConfigFetcher.get().getLogFlumeAddress());
// PluginStore<FlatTableBuilder> store = TIS.getPluginStore(FlatTableBuilder.class);
FlatTableBuilder flatTableBuilder = flatTableBuilderStore.getPlugin();
assertNotNull(flatTableBuilder);
AtomicBoolean success = new AtomicBoolean(false);
flatTableBuilder.startTask((r) -> {
Connection con = r.getObj();
assertNotNull(con);
Statement stmt = null;
ResultSet result = null;
try {
stmt = con.createStatement();
result = stmt.executeQuery("desc totalpay_summary");
while (result.next()) {
System.out.println("cols:" + result.getString(1));
}
success.set(true);
} catch (SQLException e) {
throw new RuntimeException(e);
} finally {
try {
result.close();
} catch (SQLException e) {
}
try {
stmt.close();
} catch (SQLException e) {
}
}
});
assertTrue("must success", success.get());
}
use of com.qlangtech.tis.offline.FlatTableBuilder in project tis by qlangtech.
the class DataFlowAppSource method getProcessDataResults.
// @Override
// public List<PrimaryTableMeta> getPrimaryTabs() {
// return getErRules().getPrimaryTabs();
// }
@Override
public ExecuteResult getProcessDataResults(IExecChainContext execChainContext, ISingleTableDumpFactory singleTableDumpFactory, IDataProcessFeedback dataProcessFeedback, ITaskPhaseInfo taskPhaseInfo) throws Exception {
// 执行工作流数据结构
SqlTaskNodeMeta.SqlDataFlowTopology topology = SqlTaskNodeMeta.getSqlDataFlowTopology(dataflowName);
Map<String, TISReactor.TaskAndMilestone> /**
* taskid
*/
taskMap = Maps.newHashMap();
// 取得workflowdump需要依赖的表
Collection<DependencyNode> tables = topology.getDumpNodes();
StringBuffer dumps = new StringBuffer("dependency table:\n");
dumps.append("\t\t=======================\n");
for (DependencyNode t : tables) {
dumps.append("\t\t").append(t.getDbName()).append(".").append(t.getName()).append("[").append(t.getTabid()).append(",").append("] \n");
}
dumps.append("\t\t=======================\n");
logger.info(dumps.toString());
// 将所有的表的状态先初始化出来
DumpPhaseStatus dumpPhaseStatus = taskPhaseInfo.getPhaseStatus(execChainContext, FullbuildPhase.FullDump);
DataflowTask tabDump = null;
for (DependencyNode dump : topology.getDumpNodes()) {
tabDump = singleTableDumpFactory.createSingleTableDump(dump, false, /* isHasValidTableDump */
"tableDump.getPt()", execChainContext.getZkClient(), execChainContext, dumpPhaseStatus);
taskMap.put(dump.getId(), new TISReactor.TaskAndMilestone(tabDump));
}
if (topology.isSingleTableModel()) {
return executeDAG(execChainContext, topology, dataProcessFeedback, taskMap);
} else {
final ExecuteResult[] faildResult = new ExecuteResult[1];
TemplateContext tplContext = new TemplateContext(execChainContext);
JoinPhaseStatus joinPhaseStatus = taskPhaseInfo.getPhaseStatus(execChainContext, FullbuildPhase.JOIN);
IPluginStore<FlatTableBuilder> pluginStore = TIS.getPluginStore(FlatTableBuilder.class);
Objects.requireNonNull(pluginStore.getPlugin(), "flatTableBuilder can not be null");
// chainContext.setFlatTableBuilderPlugin(pluginStore.getPlugin());
// execChainContext.getFlatTableBuilder();
final IFlatTableBuilder flatTableBuilder = pluginStore.getPlugin();
final SqlTaskNodeMeta fNode = topology.getFinalNode();
flatTableBuilder.startTask((context) -> {
DataflowTask process = null;
for (SqlTaskNodeMeta pnode : topology.getNodeMetas()) {
/**
* ***********************************
* 构建宽表构建任务节点
* ************************************
*/
process = flatTableBuilder.createTask(pnode, StringUtils.equals(fNode.getId(), pnode.getId()), tplContext, context, joinPhaseStatus.getTaskStatus(pnode.getExportName()));
taskMap.put(pnode.getId(), new TISReactor.TaskAndMilestone(process));
}
faildResult[0] = executeDAG(execChainContext, topology, dataProcessFeedback, taskMap);
});
return faildResult[0];
}
}
Aggregations