use of com.qlangtech.tis.fullbuild.phasestatus.impl.JoinPhaseStatus in project tis by qlangtech.
the class FullBuildStatCollectorServer method convertPP.
public PPhaseStatusCollection convertPP(PhaseStatusCollection phaseStatusSet) {
PPhaseStatusCollection.Builder scBuilder = PPhaseStatusCollection.newBuilder();
scBuilder.setTaskId(phaseStatusSet.getTaskid());
DumpPhaseStatus dumpPhase = phaseStatusSet.getDumpPhase();
JoinPhaseStatus joinPhase = phaseStatusSet.getJoinPhase();
BuildPhaseStatus buildPhase = phaseStatusSet.getBuildPhase();
IndexBackFlowPhaseStatus indexBackFlowPhase = phaseStatusSet.getIndexBackFlowPhaseStatus();
if (dumpPhase != null) {
PDumpPhaseStatus.Builder builder = PDumpPhaseStatus.newBuilder();
com.qlangtech.tis.rpc.grpc.log.common.TableDumpStatus.Builder tabDump = null;
DumpPhaseStatus.TableDumpStatus s = null;
for (Map.Entry<String, DumpPhaseStatus.TableDumpStatus> entry : dumpPhase.tablesDump.entrySet()) {
s = entry.getValue();
tabDump = com.qlangtech.tis.rpc.grpc.log.common.TableDumpStatus.newBuilder();
tabDump.setAllRows(s.getAllRows());
tabDump.setTableName(s.getName());
tabDump.setTaskid(s.getTaskid());
tabDump.setReadRows(s.getReadRows());
tabDump.setFaild(s.isFaild());
tabDump.setComplete(s.isComplete());
tabDump.setWaiting(s.isWaiting());
builder.putTablesDump(entry.getKey(), tabDump.build());
}
scBuilder.setDumpPhase(builder);
}
if (joinPhase != null) {
PJoinPhaseStatus.Builder builder = PJoinPhaseStatus.newBuilder();
com.qlangtech.tis.rpc.grpc.log.common.JoinTaskStatus.Builder pbuilder = null;
JoinPhaseStatus.JoinTaskStatus j = null;
com.qlangtech.tis.rpc.grpc.log.common.JobLog.Builder jlog = null;
JobLog jl = null;
for (Map.Entry<String, JoinPhaseStatus.JoinTaskStatus> entry : joinPhase.taskStatus.entrySet()) {
pbuilder = com.qlangtech.tis.rpc.grpc.log.common.JoinTaskStatus.newBuilder();
j = entry.getValue();
pbuilder.setJoinTaskName(j.getName());
pbuilder.setFaild(j.isFaild());
pbuilder.setComplete(j.isComplete());
pbuilder.setWaiting(j.isWaiting());
for (Map.Entry<Integer, JobLog> e : j.jobsStatus.entrySet()) {
jl = e.getValue();
jlog = com.qlangtech.tis.rpc.grpc.log.common.JobLog.newBuilder();
jlog.setMapper(jl.getMapper());
jlog.setReducer(jl.getReducer());
jlog.setWaiting(jl.isWaiting());
pbuilder.putJobStatus(e.getKey(), jlog.build());
}
builder.putTaskStatus(entry.getKey(), pbuilder.build());
}
scBuilder.setJoinPhase(builder);
}
if (buildPhase != null) {
PBuildPhaseStatus.Builder builder = PBuildPhaseStatus.newBuilder();
buildPhase.nodeBuildStatus.entrySet().stream().forEach((e) -> {
com.qlangtech.tis.fullbuild.phasestatus.impl.BuildSharedPhaseStatus bf = null;
com.qlangtech.tis.rpc.grpc.log.common.BuildSharedPhaseStatus.Builder bfBuilder = com.qlangtech.tis.rpc.grpc.log.common.BuildSharedPhaseStatus.newBuilder();
bf = e.getValue();
bfBuilder.setAllBuildSize(bf.getAllBuildSize());
bfBuilder.setBuildReaded(bf.getBuildReaded());
bfBuilder.setTaskid(bf.getTaskid());
bfBuilder.setSharedName(bf.getSharedName());
bfBuilder.setFaild(bf.isFaild());
bfBuilder.setComplete(bf.isComplete());
bfBuilder.setWaiting(bf.isWaiting());
builder.putNodeBuildStatus(e.getKey(), bfBuilder.build());
});
scBuilder.setBuildPhase(builder);
}
if (indexBackFlowPhase != null) {
PIndexBackFlowPhaseStatus.Builder builder = PIndexBackFlowPhaseStatus.newBuilder();
indexBackFlowPhase.nodesStatus.entrySet().stream().forEach((e) -> {
IndexBackFlowPhaseStatus.NodeBackflowStatus ib = e.getValue();
com.qlangtech.tis.rpc.grpc.log.common.NodeBackflowStatus.Builder ibBuilder = com.qlangtech.tis.rpc.grpc.log.common.NodeBackflowStatus.newBuilder();
ibBuilder.setNodeName(ib.getName());
ibBuilder.setAllSize(ib.getAllSize());
ibBuilder.setReaded(ib.getReaded());
ibBuilder.setFaild(ib.isFaild());
ibBuilder.setComplete(ib.isComplete());
ibBuilder.setWaiting(ib.isWaiting());
builder.putNodesStatus(e.getKey(), ibBuilder.build());
});
scBuilder.setIndexBackFlowPhaseStatus(builder);
}
return scBuilder.build();
}
use of com.qlangtech.tis.fullbuild.phasestatus.impl.JoinPhaseStatus in project tis by qlangtech.
the class DataFlowAppSource method executeDAG.
private ExecuteResult executeDAG(IExecChainContext execChainContext, SqlTaskNodeMeta.SqlDataFlowTopology topology, IDataProcessFeedback dataProcessFeedback, Map<String, TISReactor.TaskAndMilestone> taskMap) {
final ExecuteResult[] faildResult = new ExecuteResult[1];
try {
TISReactor reactor = new TISReactor(execChainContext, taskMap);
String dagSessionSpec = topology.getDAGSessionSpec();
logger.info("dagSessionSpec:" + dagSessionSpec);
// final PrintWriter w = new PrintWriter(sw, true);
ReactorListener listener = new ReactorListener() {
// TODO: Does it really needs handlers to be synchronized?
@Override
public synchronized void onTaskCompleted(Task t) {
processTaskResult(execChainContext, (TISReactor.TaskImpl) t, dataProcessFeedback, new ITaskResultProcessor() {
@Override
public void process(DumpPhaseStatus dumpPhase, TISReactor.TaskImpl task) {
}
@Override
public void process(JoinPhaseStatus joinPhase, TISReactor.TaskImpl task) {
}
});
}
@Override
public synchronized void onTaskFailed(Task t, Throwable err, boolean fatal) {
// w.println("Failed " + t.getDisplayName() + " with " + err);
processTaskResult(execChainContext, (TISReactor.TaskImpl) t, dataProcessFeedback, new ITaskResultProcessor() {
@Override
public void process(DumpPhaseStatus dumpPhase, TISReactor.TaskImpl task) {
dataProcessFeedback.reportDumpTableStatusError(execChainContext, task);
}
@Override
public void process(JoinPhaseStatus joinPhase, TISReactor.TaskImpl task) {
JoinPhaseStatus.JoinTaskStatus stat = joinPhase.getTaskStatus(task.getIdentityName());
// statReceiver.reportBuildIndexStatErr(execContext.getTaskId(),task.getIdentityName());
stat.setWaiting(false);
stat.setFaild(true);
stat.setComplete(true);
}
});
}
};
// 执行DAG地调度
reactor.execute(executorService, reactor.buildSession(dagSessionSpec), listener, new ReactorListener() {
@Override
public void onTaskCompleted(Task t) {
// dumpPhaseStatus.isComplete();
// joinPhaseStatus.isComplete();
}
@Override
public void onTaskFailed(Task t, Throwable err, boolean fatal) {
logger.error(t.getDisplayName(), err);
faildResult[0] = ExecuteResult.createFaild().setMessage("status.runningStatus.isComplete():" + err.getMessage());
}
});
} catch (Exception e) {
throw new RuntimeException(e);
}
return faildResult[0];
}
use of com.qlangtech.tis.fullbuild.phasestatus.impl.JoinPhaseStatus in project tis by qlangtech.
the class DataXExecuteInterceptor method execute.
@Override
protected ExecuteResult execute(IExecChainContext execChainContext) throws Exception {
int nThreads = 2;
final ExecutorService executorService = new ThreadPoolExecutor(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(DataXJobSubmit.MAX_TABS_NUM_IN_PER_JOB), Executors.defaultThreadFactory());
final Map<String, TISReactor.TaskAndMilestone> taskMap = Maps.newHashMap();
RpcServiceReference statusRpc = getDataXExecReporter();
DataxProcessor appSource = execChainContext.getAppSource();
IRemoteTaskTrigger jobTrigger = null;
RunningStatus runningStatus = null;
List<IRemoteTaskTrigger> triggers = Lists.newArrayList();
List<File> cfgFileNames = appSource.getDataxCfgFileNames(null);
if (CollectionUtils.isEmpty(cfgFileNames)) {
throw new IllegalStateException("dataX cfgFileNames can not be empty");
}
DataXJobSubmit.InstanceType expectDataXJobSumit = getDataXTriggerType();
Optional<DataXJobSubmit> jobSubmit = DataXJobSubmit.getDataXJobSubmit(expectDataXJobSumit);
// 如果分布式worker ready的话
if (!jobSubmit.isPresent()) {
throw new IllegalStateException("can not find expect jobSubmit by type:" + expectDataXJobSumit);
}
DataXJobSubmit submit = jobSubmit.get();
final DataXJobSubmit.IDataXJobContext dataXJobContext = submit.createJobContext(execChainContext);
Objects.requireNonNull(dataXJobContext, "dataXJobContext can not be null");
try {
DumpPhaseStatus dumpStatus = this.getPhaseStatus(execChainContext, FullbuildPhase.FullDump);
for (File fileName : cfgFileNames) {
jobTrigger = createDataXJob(dataXJobContext, submit, expectDataXJobSumit, statusRpc, appSource, fileName.getName());
triggers.add(jobTrigger);
taskMap.put(fileName.getName(), new TISReactor.TaskAndMilestone(DataflowTask.createDumpTask(jobTrigger)));
// StatusRpcClient.AssembleSvcCompsite svc = statusRpc.get();
// 将任务注册,可供页面展示
// svc.reportDumpJobStatus(false, false, true, execChainContext.getTaskId()
// , fileName.getName(), 0, 0);
dumpStatus.getTable(fileName.getName()).setWaiting(true);
}
logger.info("trigger dataX jobs by mode:{},with:{}", this.getDataXTriggerType(), cfgFileNames.stream().map((f) -> f.getName()).collect(Collectors.joining(",")));
// for (IRemoteJobTrigger t : triggers) {
// t.submitJob();
// }
IDataxReader reader = appSource.getReader(null);
List<ISelectedTab> selectedTabs = reader.getSelectedTabs();
// Map<String, IDataxProcessor.TableAlias> tabAlias = appSource.getTabAlias();
IDataxWriter writer = appSource.getWriter(null);
if (writer instanceof IDataXBatchPost) {
IDataXBatchPost batchPostTask = (IDataXBatchPost) writer;
JoinPhaseStatus phaseStatus = this.getPhaseStatus(execChainContext, FullbuildPhase.JOIN);
for (ISelectedTab entry : selectedTabs) {
IRemoteTaskTrigger postTaskTrigger = batchPostTask.createPostTask(execChainContext, entry);
triggers.add(postTaskTrigger);
JoinPhaseStatus.JoinTaskStatus taskStatus = phaseStatus.getTaskStatus(postTaskTrigger.getTaskName());
taskStatus.setWaiting(true);
taskMap.put(postTaskTrigger.getTaskName(), new TISReactor.TaskAndMilestone(createJoinTask(postTaskTrigger, taskStatus)));
}
}
// example: "->a ->b a,b->c"
String dagSessionSpec = triggers.stream().map((trigger) -> {
List<String> dpts = trigger.getTaskDependencies();
return dpts.stream().collect(Collectors.joining(",")) + "->" + trigger.getTaskName();
}).collect(Collectors.joining(" "));
logger.info("dataX:{} of dagSessionSpec:{}", execChainContext.getIndexName(), dagSessionSpec);
ExecuteResult[] faildResult = new ExecuteResult[] { ExecuteResult.createSuccess() };
this.executeDAG(executorService, execChainContext, dagSessionSpec, taskMap, new ReactorListener() {
@Override
public void onTaskCompleted(Task t) {
// dumpPhaseStatus.isComplete();
// joinPhaseStatus.isComplete();
}
@Override
public void onTaskFailed(Task t, Throwable err, boolean fatal) {
logger.error(t.getDisplayName(), err);
faildResult[0] = ExecuteResult.createFaild().setMessage("status.runningStatus.isComplete():" + err.getMessage());
if (err instanceof InterruptedException) {
logger.warn("DataX Name:{},taskid:{} has been canceled", execChainContext.getIndexName(), execChainContext.getTaskId());
// this job has been cancel, trigger from TisServlet.doDelete()
for (IRemoteTaskTrigger tt : triggers) {
try {
tt.cancel();
} catch (Throwable ex) {
}
}
}
}
});
// ExecuteResult result = new ExecuteResult(!faild);
for (IRemoteTaskTrigger trigger : triggers) {
if (trigger.isAsyn()) {
execChainContext.addAsynSubJob(new IExecChainContext.AsynSubJob(trigger.getAsynJobName()));
}
}
return faildResult[0];
} finally {
try {
dataXJobContext.destroy();
} catch (Throwable e) {
logger.error(e.getMessage(), e);
}
}
}
use of com.qlangtech.tis.fullbuild.phasestatus.impl.JoinPhaseStatus in project tis by qlangtech.
the class LogCollectorClient method convert.
public static PhaseStatusCollection convert(PPhaseStatusCollection stat, ExecutePhaseRange executePhaseRange) {
PDumpPhaseStatus dumpPhase = stat.getDumpPhase();
PJoinPhaseStatus joinPhase = stat.getJoinPhase();
PBuildPhaseStatus buildPhase = stat.getBuildPhase();
PIndexBackFlowPhaseStatus backflow = stat.getIndexBackFlowPhaseStatus();
PhaseStatusCollection result = new PhaseStatusCollection(stat.getTaskId(), executePhaseRange);
if (executePhaseRange.contains(FullbuildPhase.FullDump) && dumpPhase != null) {
DumpPhaseStatus dump = result.getDumpPhase();
dumpPhase.getTablesDumpMap().forEach((k, v) -> {
DumpPhaseStatus.TableDumpStatus s = new DumpPhaseStatus.TableDumpStatus(v.getTableName(), v.getTaskid());
s.setAllRows(v.getAllRows());
s.setReadRows(v.getReadRows());
s.setComplete(v.getComplete());
s.setFaild(v.getFaild());
s.setWaiting(v.getWaiting());
dump.tablesDump.put(k, s);
});
}
if (executePhaseRange.contains(FullbuildPhase.JOIN) && joinPhase != null) {
JoinPhaseStatus join = result.getJoinPhase();
Map<String, JoinPhaseStatus.JoinTaskStatus> sm = join.taskStatus;
joinPhase.getTaskStatusMap().forEach((k, v) -> {
JoinPhaseStatus.JoinTaskStatus s = new JoinPhaseStatus.JoinTaskStatus(v.getJoinTaskName());
s.setComplete(v.getComplete());
s.setFaild(v.getFaild());
s.setWaiting(v.getWaiting());
v.getJobStatusMap().forEach((jk, jv) -> {
JobLog jl = new JobLog();
jl.setMapper(jv.getMapper());
jl.setReducer(jv.getReducer());
jl.setWaiting(jv.getWaiting());
s.jobsStatus.put(jk, jl);
});
sm.put(k, s);
});
}
if (executePhaseRange.contains(FullbuildPhase.BUILD) && buildPhase != null) {
BuildPhaseStatus build = result.getBuildPhase();
buildPhase.getNodeBuildStatusMap().forEach((k, v) -> {
com.qlangtech.tis.fullbuild.phasestatus.impl.BuildSharedPhaseStatus s = new com.qlangtech.tis.fullbuild.phasestatus.impl.BuildSharedPhaseStatus();
s.setAllBuildSize(v.getAllBuildSize());
s.setBuildReaded(v.getBuildReaded());
s.setTaskid(v.getTaskid());
s.setSharedName(v.getSharedName());
s.setComplete(v.getComplete());
s.setFaild(v.getFaild());
s.setWaiting(v.getWaiting());
build.nodeBuildStatus.put(k, s);
});
}
if (executePhaseRange.contains(FullbuildPhase.IndexBackFlow) && backflow != null) {
IndexBackFlowPhaseStatus bf = result.getIndexBackFlowPhaseStatus();
backflow.getNodesStatusMap().forEach((k, v) -> {
IndexBackFlowPhaseStatus.NodeBackflowStatus s = new IndexBackFlowPhaseStatus.NodeBackflowStatus(v.getNodeName());
s.setAllSize((int) v.getAllSize());
s.setReaded((int) v.getReaded());
s.setComplete(v.getComplete());
s.setFaild(v.getFaild());
s.setWaiting(v.getWaiting());
bf.nodesStatus.put(k, s);
});
}
return result;
}
use of com.qlangtech.tis.fullbuild.phasestatus.impl.JoinPhaseStatus in project tis by qlangtech.
the class DataFlowAppSource method getProcessDataResults.
// @Override
// public List<PrimaryTableMeta> getPrimaryTabs() {
// return getErRules().getPrimaryTabs();
// }
@Override
public ExecuteResult getProcessDataResults(IExecChainContext execChainContext, ISingleTableDumpFactory singleTableDumpFactory, IDataProcessFeedback dataProcessFeedback, ITaskPhaseInfo taskPhaseInfo) throws Exception {
// 执行工作流数据结构
SqlTaskNodeMeta.SqlDataFlowTopology topology = SqlTaskNodeMeta.getSqlDataFlowTopology(dataflowName);
Map<String, TISReactor.TaskAndMilestone> /**
* taskid
*/
taskMap = Maps.newHashMap();
// 取得workflowdump需要依赖的表
Collection<DependencyNode> tables = topology.getDumpNodes();
StringBuffer dumps = new StringBuffer("dependency table:\n");
dumps.append("\t\t=======================\n");
for (DependencyNode t : tables) {
dumps.append("\t\t").append(t.getDbName()).append(".").append(t.getName()).append("[").append(t.getTabid()).append(",").append("] \n");
}
dumps.append("\t\t=======================\n");
logger.info(dumps.toString());
// 将所有的表的状态先初始化出来
DumpPhaseStatus dumpPhaseStatus = taskPhaseInfo.getPhaseStatus(execChainContext, FullbuildPhase.FullDump);
DataflowTask tabDump = null;
for (DependencyNode dump : topology.getDumpNodes()) {
tabDump = singleTableDumpFactory.createSingleTableDump(dump, false, /* isHasValidTableDump */
"tableDump.getPt()", execChainContext.getZkClient(), execChainContext, dumpPhaseStatus);
taskMap.put(dump.getId(), new TISReactor.TaskAndMilestone(tabDump));
}
if (topology.isSingleTableModel()) {
return executeDAG(execChainContext, topology, dataProcessFeedback, taskMap);
} else {
final ExecuteResult[] faildResult = new ExecuteResult[1];
TemplateContext tplContext = new TemplateContext(execChainContext);
JoinPhaseStatus joinPhaseStatus = taskPhaseInfo.getPhaseStatus(execChainContext, FullbuildPhase.JOIN);
IPluginStore<FlatTableBuilder> pluginStore = TIS.getPluginStore(FlatTableBuilder.class);
Objects.requireNonNull(pluginStore.getPlugin(), "flatTableBuilder can not be null");
// chainContext.setFlatTableBuilderPlugin(pluginStore.getPlugin());
// execChainContext.getFlatTableBuilder();
final IFlatTableBuilder flatTableBuilder = pluginStore.getPlugin();
final SqlTaskNodeMeta fNode = topology.getFinalNode();
flatTableBuilder.startTask((context) -> {
DataflowTask process = null;
for (SqlTaskNodeMeta pnode : topology.getNodeMetas()) {
/**
* ***********************************
* 构建宽表构建任务节点
* ************************************
*/
process = flatTableBuilder.createTask(pnode, StringUtils.equals(fNode.getId(), pnode.getId()), tplContext, context, joinPhaseStatus.getTaskStatus(pnode.getExportName()));
taskMap.put(pnode.getId(), new TISReactor.TaskAndMilestone(process));
}
faildResult[0] = executeDAG(execChainContext, topology, dataProcessFeedback, taskMap);
});
return faildResult[0];
}
}
Aggregations