use of com.dtstack.taier.develop.dto.devlop.ExecuteSqlParseVO in project Taier by DTStack.
the class BatchSparkSqlExeService method batchExecuteSql.
@Override
public ExecuteSqlParseVO batchExecuteSql(ExecuteContent executeContent) {
String preJobId = executeContent.getPreJobId();
Integer taskType = executeContent.getTaskType();
String currDb = executeContent.getParseResult().getCurrentDb();
Long tenantId = executeContent.getTenantId();
Long userId = executeContent.getUserId();
Long taskId = executeContent.getTaskId();
List<ParseResult> parseResultList = executeContent.getParseResultList();
ExecuteResultVO<List<Object>> result = new ExecuteResultVO<>();
boolean useSelfFunction = batchFunctionService.validContainSelfFunction(executeContent.getSql(), tenantId, null, executeContent.getTaskType());
ExecuteSqlParseVO executeSqlParseVO = new ExecuteSqlParseVO();
List<SqlResultVO> sqlIdList = Lists.newArrayList();
List<String> sqlList = Lists.newArrayList();
BuildSqlVO buildSqlVO = new BuildSqlVO();
for (ParseResult parseResult : parseResultList) {
// 简单查询
if (Objects.nonNull(parseResult.getStandardSql()) && isSimpleQuery(parseResult.getStandardSql()) && !useSelfFunction) {
result = simpleQuery(tenantId, parseResult, currDb, userId, EScheduleJobType.SPARK_SQL);
if (!result.getContinue()) {
SqlResultVO<List<Object>> sqlResultVO = new SqlResultVO<>();
sqlResultVO.setSqlId(result.getJobId());
sqlResultVO.setType(SqlTypeEnums.SELECT_DATA.getType());
sqlIdList.add(sqlResultVO);
continue;
}
}
if (SqlType.CREATE_AS.equals(parseResult.getSqlType())) {
buildSqlVO = batchHadoopSelectSqlService.getSqlIdAndSql(tenantId, parseResult, userId, currDb.toLowerCase(), true, taskId, taskType);
SqlResultVO<List<Object>> sqlResultVO = new SqlResultVO<>();
sqlResultVO.setSqlId(buildSqlVO.getJobId());
sqlResultVO.setType(SqlTypeEnums.SELECT_DATA.getType());
sqlIdList.add(sqlResultVO);
sqlList.add(buildSqlVO.getSql());
} else if (SqlType.INSERT.equals(parseResult.getSqlType()) || SqlType.INSERT_OVERWRITE.equals(parseResult.getSqlType()) || SqlType.QUERY.equals(parseResult.getSqlType()) || useSelfFunction) {
buildSqlVO = batchHadoopSelectSqlService.getSqlIdAndSql(tenantId, parseResult, userId, currDb.toLowerCase(), false, taskId, taskType);
// insert和insert overwrite都没有返回结果
SqlResultVO sqlResultVO = new SqlResultVO();
sqlResultVO.setSqlId(buildSqlVO.getJobId());
sqlResultVO.setType(SqlTypeEnums.SELECT_DATA.getType());
sqlIdList.add(sqlResultVO);
sqlList.add(buildSqlVO.getSql());
} else {
if (!executeContent.isExecuteSqlLater()) {
TenantComponent tenantEngine = developTenantComponentService.getByTenantAndEngineType(executeContent.getTenantId(), executeContent.getTaskType());
Preconditions.checkNotNull(tenantEngine, "引擎不能为空");
SqlResultVO<List<Object>> sqlResultVO = new SqlResultVO<>();
sqlResultVO.setSqlText(parseResult.getStandardSql());
sqlResultVO.setType(SqlTypeEnums.NO_SELECT_DATA.getType());
if (SqlType.CREATE.equals(parseResult.getSqlType()) || SqlType.CREATE_LIKE.equals(parseResult.getSqlType())) {
executeCreateTableSql(parseResult, tenantId, tenantEngine.getComponentIdentity().toLowerCase(), EScheduleJobType.SPARK_SQL);
sqlIdList.add(sqlResultVO);
} else {
exeSqlDirect(executeContent, tenantId, parseResult, result, tenantEngine, DataSourceType.Spark);
sqlResultVO.setResult(result.getResult());
sqlIdList.add(sqlResultVO);
}
}
}
}
String sqlToEngine = StringUtils.join(sqlList, ";");
// 除简单查询,其他sql发送到engine执行
String jobId = batchHadoopSelectSqlService.sendSqlTask(tenantId, sqlToEngine, buildSqlVO.getTaskParam(), preJobId, taskId, executeContent.getTaskType());
// 记录发送到engine的id
selectSqlService.addSelectSql(jobId, StringUtils.EMPTY, 0, tenantId, sqlToEngine, userId, StringUtils.EMPTY, taskType);
sqlIdList.sort(Comparator.comparingInt(SqlResultVO::getType));
executeSqlParseVO.setJobId(jobId);
executeSqlParseVO.setSqlIdList(sqlIdList);
return executeSqlParseVO;
}
Aggregations