use of com.dtstack.taier.dao.domain.ScheduleEngineJobCache in project Taier by DTStack.
the class JobDealer method getAndUpdateEngineLog.
public String getAndUpdateEngineLog(String jobId, String engineJobId, String appId, Long tenantId) {
if (StringUtils.isBlank(engineJobId)) {
return "";
}
String engineLog = null;
try {
ScheduleEngineJobCache engineJobCache = scheduleJobCacheService.getJobCacheByJobId(jobId);
if (null == engineJobCache) {
return "";
}
ParamAction paramAction = PublicUtil.jsonStrToObject(engineJobCache.getJobInfo(), ParamAction.class);
Map<String, Object> pluginInfo = paramAction.getPluginInfo();
JobIdentifier jobIdentifier = new JobIdentifier(engineJobId, appId, jobId, tenantId, paramAction.getTaskType(), TaskParamsUtils.parseDeployTypeByTaskParams(paramAction.getTaskParams(), engineJobCache.getComputeType()).getType(), null, MapUtils.isEmpty(pluginInfo) ? null : JSONObject.toJSONString(pluginInfo), paramAction.getComponentVersion());
// 从engine获取log
engineLog = workerOperator.getEngineLog(jobIdentifier);
if (engineLog != null) {
scheduleJobService.updateExpandByJobId(jobId, engineLog, null);
}
} catch (Throwable e) {
LOGGER.error("getAndUpdateEngineLog error jobId:{} error:.", jobId, e);
}
return engineLog;
}
use of com.dtstack.taier.dao.domain.ScheduleEngineJobCache in project Taier by DTStack.
the class JobStopDealer method checkExpired.
private boolean checkExpired(JobElement jobElement) {
ScheduleEngineJobCache jobCache = engineJobCacheService.getByJobId(jobElement.jobId);
ScheduleJobOperatorRecord scheduleJobOperatorRecord = scheduleJobOperatorRecordService.getById(jobElement.stopJobId);
if (jobCache != null && scheduleJobOperatorRecord != null && scheduleJobOperatorRecord.getGmtCreate() != null) {
return jobCache.getGmtCreate().after(scheduleJobOperatorRecord.getGmtCreate());
} else {
return true;
}
}
use of com.dtstack.taier.dao.domain.ScheduleEngineJobCache in project Taier by DTStack.
the class JobSubmitDealer method checkIsFinished.
private boolean checkIsFinished(JobClient jobClient) {
ScheduleEngineJobCache engineJobCache = engineJobCacheService.getByJobId(jobClient.getJobId());
try {
if (null == jobClient.getQueueSourceType() || EQueueSourceType.NORMAL.getCode() == jobClient.getQueueSourceType()) {
if (null == engineJobCache) {
shardCache.updateLocalMemTaskStatus(jobClient.getJobId(), TaskStatus.CANCELED.getStatus());
jobClient.doStatusCallBack(TaskStatus.CANCELED.getStatus());
LOGGER.info("jobId:{} checkIsFinished is true, job is Finished.", jobClient.getJobId());
return true;
}
} else {
if (null == engineJobCache) {
// 如果任务出现资源不足 一直deploy加大延时 界面杀死重跑立马完成之后 deployQueue数据未移除
// 重新放入之后直接取消 导致状态更新waitEngine 状态不一致 所以需要判断下数据是否存在
LOGGER.info("jobId:{} stage:{} take job from delayJobQueue but engine job cache has deleted", jobClient.getJobId(), delayJobQueue.size());
return true;
} else {
// 如果任务存在 还需要判断cache表数据是否为重跑后插入生成的
boolean checkCanSubmit = true;
if (null != jobClient.getSubmitCacheTime()) {
long insertDbCacheTime = engineJobCache.getGmtCreate().getTime();
checkCanSubmit = insertDbCacheTime <= jobClient.getSubmitCacheTime();
}
if (checkCanSubmit) {
engineJobCacheService.updateStage(jobClient.getJobId(), EJobCacheStage.PRIORITY.getStage(), localAddress, jobClient.getPriority(), null);
jobClient.doStatusCallBack(TaskStatus.WAITENGINE.getStatus());
return false;
} else {
// 插入cache表的时间 比 jobClient 第一次提交时间晚 认为任务重新提交过 当前延时队列的jobClient 抛弃 不做任何处理
LOGGER.info("jobId:{} checkIsFinished is true checkCanSubmit is false jobClient cacheSubmitTime {} cacheDB SubmitTime {}, job is Finished.", jobClient.getJobId(), jobClient.getSubmitCacheTime(), engineJobCache.getGmtCreate().getTime());
return true;
}
}
}
} finally {
// 重置状态
jobClient.setQueueSourceType(EQueueSourceType.NORMAL.getCode());
if (null != engineJobCache && null == jobClient.getSubmitCacheTime()) {
LOGGER.info("jobId:{} set submitCacheTime is {},", jobClient.getJobId(), engineJobCache.getGmtCreate().getTime());
jobClient.setSubmitCacheTime(engineJobCache.getGmtCreate().getTime());
}
}
return false;
}
use of com.dtstack.taier.dao.domain.ScheduleEngineJobCache in project Taier by DTStack.
the class OperatorRecordJobScheduler method removeOperatorRecord.
/**
* 删除没有用的操作记录
*
* @param deleteJobIdList 实例id
*/
private void removeOperatorRecord(List<String> deleteJobIdList) {
// 删除OperatorRecord记录时,需要考虑的问题
// 1. 因为入jobId已经对应的实例已经提交状态,未提交的id不会进入
// 2. 就是cache是空的情况下,有两种可能性
// 第一就是还没有创建cache,这个时候Operator不能删
// 第二是job运行完成后,这个时候Operator需要删除
// 查询cache表的数据
Map<String, ScheduleEngineJobCache> scheduleEngineJobCacheMaps = scheduleJobCacheService.lambdaQuery().in(ScheduleEngineJobCache::getJobId, deleteJobIdList).eq(ScheduleEngineJobCache::getIsDeleted, Deleted.NORMAL.getStatus()).list().stream().collect(Collectors.toMap(ScheduleEngineJobCache::getJobId, g -> (g)));
// 查询需要删除的OperatorRecord的实例信息
Map<String, ScheduleJob> scheduleJobMap = scheduleJobService.lambdaQuery().in(ScheduleJob::getJobId, deleteJobIdList).eq(ScheduleJob::getIsDeleted, Deleted.NORMAL.getStatus()).list().stream().collect(Collectors.toMap(ScheduleJob::getJobId, g -> (g)));
List<String> needDeleteJobIdList = Lists.newArrayList();
for (String jobId : deleteJobIdList) {
ScheduleEngineJobCache scheduleEngineJobCache = scheduleEngineJobCacheMaps.get(jobId);
if (scheduleEngineJobCache != null) {
// cache是空的 两种情况 就是上面2的两种情况,这时我们需要判断job的状态
ScheduleJob scheduleJob = scheduleJobMap.get(jobId);
// 如果周期实例是停止状态,那说明job运行完成后,这个时候Operator需要删除
if (scheduleJob != null && TaskStatus.STOPPED_STATUS.contains(scheduleJob.getStatus())) {
needDeleteJobIdList.add(jobId);
}
if (scheduleJob == null) {
// 实例查询不到的情况,一般不会出现,但是出来的OperatorRecord也是没有存在的必要,所以需要直接删除
needDeleteJobIdList.add(jobId);
}
} else {
// cache信息不是空的,就可以直接删除,重启可以考cache重新拉起
needDeleteJobIdList.add(jobId);
}
}
// 删除OperatorRecord记录
if (CollectionUtils.isNotEmpty(needDeleteJobIdList)) {
scheduleJobOperatorRecordService.lambdaUpdate().in(ScheduleJobOperatorRecord::getJobId, needDeleteJobIdList).remove();
}
}
use of com.dtstack.taier.dao.domain.ScheduleEngineJobCache in project Taier by DTStack.
the class EngineJobCacheService method updateNodeAddressFailover.
public int updateNodeAddressFailover(String nodeAddress, List<String> jobIds, Integer stage) {
ScheduleEngineJobCache jobCache = new ScheduleEngineJobCache();
jobCache.setNodeAddress(nodeAddress);
jobCache.setStage(stage);
return getBaseMapper().update(jobCache, Wrappers.lambdaQuery(ScheduleEngineJobCache.class).in(ScheduleEngineJobCache::getJobId, jobIds));
}
Aggregations