use of com.dtstack.taier.common.exception.RdosDefineException in project Taier by DTStack.
the class InceptorWriter method checkFormat.
@Override
public void checkFormat(JSONObject data) {
JSONObject parameter = data.getJSONObject("parameter");
if (StringUtils.isEmpty(parameter.getString("path"))) {
throw new RdosDefineException("path 不能为空");
}
JSONArray column = parameter.getJSONArray("column");
if (column == null || column.isEmpty()) {
throw new RdosDefineException("column 不能为空");
}
for (Object o : column) {
if (o instanceof String) {
throw new RdosDefineException("column 必须为对象数组 : [{\"name\":\"id\",\"type\":\"int\"}]");
}
}
}
use of com.dtstack.taier.common.exception.RdosDefineException in project Taier by DTStack.
the class InceptorWriter method toWriterJson.
@Override
public JSONObject toWriterJson() {
try {
inferHdfsParams();
JSONObject connection = new JSONObject(true);
connection.put("jdbcUrl", this.getJdbcUrl());
connection.put("table", StringUtils.isNotBlank(this.getTable()) ? Lists.newArrayList(this.getTable()) : Lists.newArrayList());
HDFSWriter hdfsWriter = new HDFSWriter();
if (writeMode != null && writeMode.trim().length() != 0) {
writeMode = SyncWriteMode.tranferHiveMode(writeMode);
} else {
writeMode = SyncWriteMode.HIVE_OVERWRITE.getMode();
}
hdfsWriter.setWriteMode(writeMode);
hdfsWriter.setColumn(column);
hdfsWriter.setFileName(fileName);
hdfsWriter.setEncoding(encoding);
hdfsWriter.setFieldDelimiter(fieldDelimiter);
hdfsWriter.setFileType(fileType);
hdfsWriter.setDefaultFS(defaultFS);
dealHiveMetaStores();
hdfsWriter.setHadoopConfig(hadoopConfig);
hdfsWriter.setPath(path == null ? "" : path.trim());
hdfsWriter.setExtralConfig(super.getExtralConfig());
hdfsWriter.setSourceIds(sourceIds);
if (StringUtils.isNotEmpty(partition)) {
hdfsWriter.setFileName(partition);
hdfsWriter.setPartition(partition);
} else {
hdfsWriter.setFileName("");
}
hdfsWriter.setFullColumnName(fullColumnNames);
hdfsWriter.setFullColumnType(fullColumnTypes);
if (StringUtils.isNotEmpty(table)) {
hdfsWriter.setTable(table);
}
if (StringUtils.isNotEmpty(jdbcUrl)) {
hdfsWriter.setJdbcUrl(jdbcUrl);
}
if (StringUtils.isNotEmpty(username)) {
hdfsWriter.setUsername(username);
}
if (StringUtils.isNotEmpty(password)) {
hdfsWriter.setPassword(password);
}
if (MapUtils.isNotEmpty(sftpConf)) {
hdfsWriter.setSftpConf(sftpConf);
}
if (StringUtils.isNotEmpty(remoteDir)) {
hdfsWriter.setRemoteDir(remoteDir);
}
JSONObject hdfsWriterJSONObject = hdfsWriter.toWriterJson();
JSONObject hdfsWriterParameterJSONObject = hdfsWriterJSONObject.getJSONObject("parameter");
hdfsWriterParameterJSONObject.put("table", getTable());
hdfsWriterParameterJSONObject.put("isTransaction", isTransaction);
hdfsWriterParameterJSONObject.put("partitionName", generatePartition());
hdfsWriterParameterJSONObject.put("schema", dbName);
hdfsWriterJSONObject.put("name", PluginName.INCEPTOR_W);
return hdfsWriterJSONObject;
} catch (Throwable ex) {
throw new RdosDefineException(ex.getCause().getMessage(), ex);
}
}
use of com.dtstack.taier.common.exception.RdosDefineException in project Taier by DTStack.
the class ActionService method queryJobLog.
/**
* 查看周期实例日志
*
* @param jobId 实例id
* @param pageInfo 第几次重试日志
* @return 日志信息
*/
public ReturnJobLogVO queryJobLog(String jobId, Integer pageInfo) {
if (pageInfo == null) {
pageInfo = 1;
}
// 查询周期实例
ScheduleJob scheduleJob = jobService.lambdaQuery().eq(ScheduleJob::getJobId, jobId).eq(ScheduleJob::getIsDeleted, Deleted.NORMAL.getStatus()).one();
if (scheduleJob == null) {
throw new RdosDefineException("not find job,please contact the administrator");
}
// 取最新
if (0 == pageInfo) {
pageInfo = scheduleJob.getRetryNum();
}
ReturnJobLogVO jobLogVO = new ReturnJobLogVO();
jobLogVO.setPageIndex(pageInfo);
jobLogVO.setPageSize(scheduleJob.getRetryNum());
// 如果RetryNum>1 说明实例已经进行了一次重试,所以取查询重试日志
if (scheduleJob.getRetryNum() > 1) {
// 查询重试日志
ScheduleEngineJobRetry scheduleEngineJobRetry = jobRetryService.lambdaQuery().eq(ScheduleEngineJobRetry::getJobId, jobId).eq(ScheduleEngineJobRetry::getRetryNum, pageInfo).eq(ScheduleEngineJobRetry::getIsDeleted, Deleted.NORMAL.getStatus()).orderBy(true, false, ScheduleEngineJobRetry::getId).one();
if (scheduleEngineJobRetry != null) {
jobLogVO.setLogInfo(scheduleEngineJobRetry.getLogInfo());
jobLogVO.setEngineLog(scheduleEngineJobRetry.getEngineLog());
}
jobLogVO.setPageIndex(pageInfo);
jobLogVO.setPageSize(scheduleJob.getMaxRetryNum());
} else {
// 查询当前日志
ScheduleJobExpand scheduleJobExpand = jobExpandService.lambdaQuery().eq(ScheduleJobExpand::getIsDeleted, Deleted.NORMAL.getStatus()).eq(ScheduleJobExpand::getJobId, jobId).one();
if (scheduleJobExpand != null) {
jobLogVO.setLogInfo(scheduleJobExpand.getLogInfo());
jobLogVO.setEngineLog(scheduleJobExpand.getEngineLog());
}
}
// 封装sql信息
ScheduleTaskShade scheduleTaskShade = taskService.lambdaQuery().eq(ScheduleTaskShade::getTaskId, scheduleJob.getTaskId()).eq(ScheduleTaskShade::getIsDeleted, Deleted.NORMAL.getStatus()).one();
if (null != scheduleTaskShade) {
JSONObject shadeInfo = scheduleTaskShadeInfoService.getInfoJSON(scheduleTaskShade.getTaskId());
String taskParams = shadeInfo.getString("taskParamsToReplace");
List<ScheduleTaskParamShade> taskParamsToReplace = JSONObject.parseArray(taskParams, ScheduleTaskParamShade.class);
String sqlText = scheduleTaskShade.getSqlText();
if (EScheduleJobType.SYNC.getType().equals(scheduleTaskShade.getTaskType())) {
sqlText = Base64Util.baseDecode(sqlText);
}
sqlText = JobParamReplace.paramReplace(sqlText, taskParamsToReplace, scheduleJob.getCycTime());
jobLogVO.setSqlText(sqlText);
Timestamp execStartTime = scheduleJob.getExecStartTime();
Timestamp execEndTime = scheduleJob.getExecEndTime();
if (EScheduleJobType.SYNC.getType().equals(scheduleTaskShade.getTaskType())) {
String syncLog = null;
try {
syncLog = batchServerLogService.formatPerfLogInfo(scheduleJob.getEngineJobId(), scheduleJob.getJobId(), Optional.ofNullable(execStartTime).orElse(Timestamp.valueOf(LocalDateTime.now())).getTime(), Optional.ofNullable(execEndTime).orElse(Timestamp.valueOf(LocalDateTime.now())).getTime(), scheduleJob.getTenantId());
} catch (Exception e) {
LOGGER.error("queryJobLog {} sync log error", jobId, e);
}
jobLogVO.setSyncLog(syncLog);
}
if (EScheduleJobType.SPARK_SQL.getType().equals(scheduleTaskShade.getTaskType())) {
jobLogVO.setDownLoadUrl(String.format(CommonConstant.DOWNLOAD_LOG, scheduleJob.getJobId(), scheduleJob.getTaskType(), scheduleJob.getTenantId()));
}
}
return jobLogVO;
}
use of com.dtstack.taier.common.exception.RdosDefineException in project Taier by DTStack.
the class JobJobService method displayOffSpring.
public ReturnJobDisplayVO displayOffSpring(QueryJobDisplayDTO dto) {
// 设置层级 0<level< max.level
dto.setLevel(JobUtils.checkLevel(dto.getLevel(), context.getMaxLevel()));
// 查询实例是否存在,如不不存在,直接抛异常,下面的逻辑不需要在走了
ScheduleJob scheduleJob = jobService.lambdaQuery().eq(ScheduleJob::getJobId, dto.getJobId()).eq(ScheduleJob::getIsDeleted, Deleted.NORMAL.getStatus()).one();
if (scheduleJob == null) {
throw new RdosDefineException("job does not exist");
}
// 先从db里面查询数据,然后在递归封装成节点
List<String> jobKeys = Lists.newArrayList(scheduleJob.getJobKey());
Map<String, List<String>> jobJobMaps = findJobJobByJobKeys(dto, jobKeys);
// 查询所有实例
List<ScheduleJob> scheduleJobList = findJobByJobJob(jobJobMaps);
scheduleJobList.add(scheduleJob);
Map<String, ScheduleJob> jobMap = scheduleJobList.stream().collect(Collectors.groupingBy(ScheduleJob::getJobKey, Collectors.collectingAndThen(Collectors.toCollection(ArrayList<ScheduleJob>::new), a -> a.get(0))));
// 查询所有任务
Map<Long, ScheduleTaskShade> taskShadeMap = findTaskJob(scheduleJobList);
ReturnJobDisplayVO vo = new ReturnJobDisplayVO();
vo.setDirectType(dto.getDirectType());
vo.setRootNode(buildRootNode(dto.getDirectType(), scheduleJob, taskShadeMap, jobMap, jobJobMaps));
return vo;
}
use of com.dtstack.taier.common.exception.RdosDefineException in project Taier by DTStack.
the class TenantController method pageQuery.
@PostMapping(value = "/pageQuery")
public R<PageResult<List<ClusterTenantVO>>> pageQuery(@RequestParam("clusterId") Long clusterId, @RequestParam("name") String tenantName, @RequestParam("pageSize") int pageSize, @RequestParam("currentPage") int currentPage) {
Cluster cluster = clusterService.getCluster(clusterId);
if (cluster == null) {
throw new RdosDefineException(ErrorCode.CANT_NOT_FIND_CLUSTER);
}
tenantName = tenantName == null ? "" : tenantName;
return R.ok(tenantService.pageQuery(clusterId, tenantName, pageSize, currentPage));
}
Aggregations