use of com.webank.wedatasphere.qualitis.exception.DataSourceMoveException in project Qualitis by WeBankFinTech.
the class OuterExecutionServiceImpl method checkDatasource.
private void checkDatasource(Rule currentRule, String userName, StringBuffer partition, List<Map<String, String>> mappingCols, String nodeName, String clusterName, Map<Long, Map> dataSourceMysqlConnect) throws UnExpectedRequestException, MetaDataAcquireFailedException, DataSourceOverSizeException, DataSourceMoveException, BothNullDatasourceException, LeftNullDatasourceException, RightNullDatasourceException {
// For multi source rule to check tables' size before submit.
List<Double> datasourceSizeList = new ArrayList<>(currentRule.getRuleDataSources().size());
for (RuleDataSource ruleDataSource : currentRule.getRuleDataSources()) {
Map<String, String> mappingCol = null;
if (ORIGINAL_INDEX.equals(ruleDataSource.getDatasourceIndex())) {
continue;
}
if (ruleDataSource.getDatasourceIndex() != null && mappingCols.get(ruleDataSource.getDatasourceIndex()).size() > 0) {
mappingCol = mappingCols.get(ruleDataSource.getDatasourceIndex());
}
if (ruleDataSource.getLinkisDataSourceId() != null) {
LOGGER.info("Start to solve relationship datasource info.");
checkRdmsSqlMetaInfo(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), userName, ruleDataSource, mappingCol);
GeneralResponse<Map> dataSourceInfoDetail = metaDataClient.getDataSourceInfoDetail(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), userName, ruleDataSource.getLinkisDataSourceId(), ruleDataSource.getLinkisDataSourceVersionId());
GeneralResponse<Map> dataSourceConnectParams = metaDataClient.getDataSourceConnectParams(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), userName, ruleDataSource.getLinkisDataSourceId(), ruleDataSource.getLinkisDataSourceVersionId());
Map connectParamsReal = (Map) dataSourceConnectParams.getData().get("connectParams");
if (connectParamsReal.size() == 0) {
throw new UnExpectedRequestException("{&THE_DATASOURCE_IS_NOT_DEPLOYED}");
}
Map connectParams = (Map) ((Map) dataSourceInfoDetail.getData().get("info")).get("connectParams");
String dataType = (String) ((Map) ((Map) dataSourceInfoDetail.getData().get("info")).get("dataSourceType")).get("name");
connectParams.put("dataType", dataType);
dataSourceMysqlConnect.put(ruleDataSource.getId(), connectParams);
continue;
}
// Parse filter fields.
List<String> filterFields = getFilterFields(partition.toString());
if (StringUtils.isNotBlank(ruleDataSource.getDbName()) && !ruleDataSource.getDbName().equals(RuleConstraintEnum.CUSTOM_DATABASE_PREFIS.getValue())) {
// Get actual fields info.
List<ColumnInfoDetail> cols = metaDataClient.getColumnInfo(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), ruleDataSource.getDbName(), ruleDataSource.getTableName(), userName);
if (CollectionUtils.isEmpty(cols)) {
throw new DataSourceMoveException("Table[" + ruleDataSource.getTableName() + "]. {&RULE_DATASOURCE_BE_MOVED}");
}
// Get actual partition fields.
List<String> partitionFields = cols.stream().filter(ColumnInfoDetail::getPartitionField).map(ColumnInfoDetail::getFieldName).collect(Collectors.toList());
// Check filter fields.
boolean partitionTable = CollectionUtils.isNotEmpty(partitionFields);
if (partitionTable && partition.length() > 0) {
for (String filter : filterFields) {
if (!partitionFields.contains(filter)) {
throw new UnExpectedRequestException("Table[" + ruleDataSource.getTableName() + "]. {&THE_CHECK_FIELD_DOES_NOT_EXIST_IN_PARTITIONS}[" + filter + "]");
}
}
// Check partition size.
PartitionStatisticsInfo partitionStatisticsInfo = metaDataClient.getPartitionStatisticsInfo(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), ruleDataSource.getDbName(), ruleDataSource.getTableName(), filterToPartitionPath(partition.toString()), userName);
String fullSize = partitionStatisticsInfo.getPartitionSize();
ClusterInfo clusterInfo = clusterInfoDao.findByClusterName(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName());
if (clusterInfo != null && StringUtils.isNotBlank(clusterInfo.getSkipDataSize()) && StringUtils.isNotBlank(fullSize)) {
double number = 0;
String unit = "B";
if (!"0B".equals(fullSize)) {
number = Double.parseDouble(fullSize.split(" ")[0]);
unit = fullSize.split(" ")[1];
}
datasourceSizeList.add(number);
String[] skipDataSize = clusterInfo.getSkipDataSize().split(" ");
double res = UnitTransfer.alarmconfigToTaskResult(number, skipDataSize[1], unit);
LOGGER.info("Check datasource[" + fullSize + "] if or not oversize with system config[" + clusterInfo.getSkipDataSize() + "]");
if (res > Double.parseDouble(skipDataSize[0])) {
throw new DataSourceOverSizeException("Table[" + ruleDataSource.getTableName() + "]. {&TABLE_IS_OVERSIZE_WITH_SYSTEM_CONFIG}:[" + clusterInfo.getSkipDataSize() + "]");
}
}
} else {
// Check table size.
TableStatisticsInfo tableStatisticsInfo = metaDataClient.getTableStatisticsInfo(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), ruleDataSource.getDbName(), ruleDataSource.getTableName(), userName);
String fullSize = tableStatisticsInfo.getTableSize();
if (NULL_TABLE_SIZE.equals(fullSize)) {
throw new DataSourceMoveException("Table[" + ruleDataSource.getTableName() + "] {&RULE_DATASOURCE_BE_MOVED}");
}
ClusterInfo clusterInfo = clusterInfoDao.findByClusterName(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName());
if (clusterInfo != null && StringUtils.isNotBlank(clusterInfo.getSkipDataSize()) && StringUtils.isNotBlank(fullSize)) {
LOGGER.info("Check datasource[" + fullSize + "] if or not oversize with system config[" + clusterInfo.getSkipDataSize() + "]");
double number = 0;
String unit = "B";
if (!"0B".equals(fullSize)) {
number = Double.parseDouble(fullSize.split(" ")[0]);
unit = fullSize.split(" ")[1];
}
datasourceSizeList.add(number);
String[] skipDataSize = clusterInfo.getSkipDataSize().split(" ");
double res = UnitTransfer.alarmconfigToTaskResult(number, skipDataSize[1], unit);
if (res > Double.parseDouble(skipDataSize[0])) {
throw new DataSourceOverSizeException("Table[" + ruleDataSource.getTableName() + "] is oversize with system config:[" + clusterInfo.getSkipDataSize() + "]");
}
}
partition.delete(0, partition.length());
}
if (currentRule.getRuleType().equals(RuleTypeEnum.CUSTOM_RULE.getCode())) {
continue;
}
if (!metaDataClient.fieldExist(ruleDataSource.getColName(), cols, mappingCol)) {
throw new DataSourceMoveException("Table[" + ruleDataSource.getTableName() + "] {&RULE_DATASOURCE_BE_MOVED}");
}
} else if (StringUtils.isNotBlank(currentRule.getCsId())) {
checkDatasourceInContextService(ruleDataSource, mappingCol, clusterName, userName, nodeName, currentRule.getCsId());
}
}
if (CollectionUtils.isNotEmpty(datasourceSizeList) && currentRule.getRuleType().equals(RuleTypeEnum.MULTI_TEMPLATE_RULE.getCode())) {
double left = datasourceSizeList.get(0);
double right = datasourceSizeList.get(1);
LOGGER.info("Current multi source rule left table size number[{}], right table size number[{}]", left, right);
if (left == 0 && right == 0) {
throw new BothNullDatasourceException("{&BOTH_SIDE_ARE_NULL}");
} else if (left == 0) {
throw new LeftNullDatasourceException("{&ONE_SIDE_ARE_NULL}");
} else if (right == 0) {
throw new RightNullDatasourceException("{&ONE_SIDE_ARE_NULL}");
}
}
}
use of com.webank.wedatasphere.qualitis.exception.DataSourceMoveException in project Qualitis by WeBankFinTech.
the class OuterExecutionServiceImpl method submitRules.
/**
* Generate jobs by rules and submit jobs to linkis.
*
* @param ruleIds
* @param partition
* @param createUser
* @param executionUser
* @param nodeName
* @param projectId
* @param ruleGroupId
* @param execParams
* @param runDate
* @param invokeCode
* @return
* @throws UnExpectedRequestException
*/
@Override
@Transactional(propagation = Propagation.REQUIRES_NEW, rollbackFor = { RuntimeException.class, UnExpectedRequestException.class })
public GeneralResponse<?> submitRules(List<Long> ruleIds, StringBuffer partition, String createUser, String executionUser, String nodeName, Long projectId, Long ruleGroupId, String startupParam, String clusterName, String setFlag, Map<String, String> execParams, String execParamStr, StringBuffer runDate, Integer invokeCode) {
// Get rule eneity.
List<Rule> rules = ruleDao.findByIds(ruleIds);
// Init application basic info.
Date date = new Date();
Application newApplication = outerExecutionService.generateApplicationInfo(createUser, executionUser, date, invokeCode);
newApplication.setProjectId(projectId);
newApplication.setRuleGroupId(ruleGroupId);
newApplication.setPartition(partition.toString());
newApplication.setClusterName(clusterName);
newApplication.setStartupParam(startupParam);
newApplication.setExecutionParam(execParamStr);
newApplication.setSetFlag(setFlag);
ApplicationTaskSimpleResponse response;
try {
response = outerExecutionService.commonExecution(rules, partition, executionUser, nodeName, startupParam, clusterName, setFlag, execParams, newApplication, date, runDate);
} catch (BothNullDatasourceException e) {
catchAndSolve(e, ApplicationCommentEnum.BOTH_NULL_ISSUES.getCode(), ApplicationStatusEnum.FINISHED.getCode(), rules, newApplication);
return new GeneralResponse<>("500", e.getMessage(), null);
} catch (LeftNullDatasourceException e) {
catchAndSolve(e, ApplicationCommentEnum.LEFT_NULL_DATA_ISSUES.getCode(), ApplicationStatusEnum.NOT_PASS.getCode(), rules, newApplication);
return new GeneralResponse<>("500", e.getMessage(), null);
} catch (RightNullDatasourceException e) {
catchAndSolve(e, ApplicationCommentEnum.RIGHT_NULL_DATA_ISSUES.getCode(), ApplicationStatusEnum.NOT_PASS.getCode(), rules, newApplication);
return new GeneralResponse<>("500", e.getMessage(), null);
} catch (MetaDataAcquireFailedException e) {
catchAndSolve(e, ApplicationCommentEnum.METADATA_ISSUES.getCode(), ApplicationStatusEnum.TASK_SUBMIT_FAILED.getCode(), rules, newApplication);
return new GeneralResponse<>("500", "{&THE_CHECK_FIELD_HAS_BEEN_MODIFIED}", null);
} catch (DataSourceMoveException e) {
catchAndSolve(e, ApplicationCommentEnum.PERMISSION_ISSUES.getCode(), ApplicationStatusEnum.TASK_SUBMIT_FAILED.getCode(), rules, newApplication);
return new GeneralResponse<>("500", e.getMessage(), null);
} catch (DataSourceOverSizeException e) {
catchAndSolve(e, ApplicationCommentEnum.PERMISSION_ISSUES.getCode(), ApplicationStatusEnum.TASK_SUBMIT_FAILED.getCode(), rules, newApplication);
return new GeneralResponse<>("500", e.getMessage(), null);
} catch (ParseException e) {
catchAndSolve(e, ApplicationCommentEnum.GRAMMAR_ISSUES.getCode(), ApplicationStatusEnum.TASK_SUBMIT_FAILED.getCode(), rules, newApplication);
LOGGER.error(e.getMessage(), e);
return new GeneralResponse<>("500", "{&PARSE_SQL_FAILED_PLEASE_CHECKOUT_YOUR_CUSTOM_SQL}", null);
} catch (Exception e) {
catchAndSolve(e, ApplicationCommentEnum.UNKNOWN_ERROR_ISSUES.getCode(), ApplicationStatusEnum.TASK_SUBMIT_FAILED.getCode(), rules, newApplication);
return new GeneralResponse<>("500", e.getMessage(), null);
}
LOGGER.info("Succeed to dispatch task. response: {}", response);
return new GeneralResponse<>("200", "{&SUCCEED_TO_DISPATCH_TASK}", response);
}
Aggregations