use of com.webank.wedatasphere.qualitis.entity.ClusterInfo in project Qualitis by WeBankFinTech.
the class ApplicationServiceImpl method uploadDataSourceAnalysisResult.
@Override
public GeneralResponse<?> uploadDataSourceAnalysisResult(UploadResultRequest request) throws UnExpectedRequestException, IOException {
// Login user permission.
Long userId = HttpUtils.getUserId(httpServletRequest);
User user = userDao.findById(userId);
if (user == null) {
throw new UnExpectedRequestException("User {&DOES_NOT_EXIST}");
}
UploadResultRequest.checkRequest(request);
List<ApplicationClusterResponse> responses = (List<ApplicationClusterResponse>) getDataSource(new PageRequest(0, Integer.MAX_VALUE)).getData();
List<String> tables = responses.stream().filter(cluster -> cluster.getClusterName().equals(request.getClusterName())).map(ApplicationClusterResponse::getDatabase).flatMap(database -> database.stream()).filter(databaseResponse -> databaseResponse.getDatabaseName().equals(request.getDatabaseName())).map(ApplicationDatabaseResponse::getTable).flatMap(table -> table.stream()).distinct().collect(Collectors.toList());
if (StringUtils.isNotBlank(request.getTableName())) {
tables.clear();
tables.add(request.getTableName());
}
LOGGER.info("Start to write excel");
StringBuffer fileName = new StringBuffer();
fileName.append(linkisConfig.getUploadTmpPath()).append(File.separator).append(user.getUserName()).append("_").append(request.getClusterName()).append("_").append(request.getDatabaseName()).append("_").append(UUID.randomUUID().toString()).append(ExcelTypeEnum.XLSX.getValue());
File tmpFile = new File(fileName.toString());
writeExcelFile(tmpFile, tables, request);
// Upload to HDFS
ClusterInfo clusterInfo = clusterInfoDao.findByClusterName(request.getClusterName());
if (clusterInfo == null) {
throw new UnExpectedRequestException("Cluster info " + "[" + request.getClusterName() + "]" + "{&DOES_NOT_EXIST}");
}
// send request to get dbs
String url = UriBuilder.fromUri(clusterInfo.getLinkisAddress()).path(linkisConfig.getPrefix()).path(linkisConfig.getUpload()).toString();
CloseableHttpClient httpclient = HttpClients.createDefault();
HttpPost httppost = new HttpPost(url);
MultipartEntityBuilder multipartEntityBuilder = MultipartEntityBuilder.create();
multipartEntityBuilder.setContentType(ContentType.MULTIPART_FORM_DATA);
multipartEntityBuilder.setCharset(Charset.forName("UTF-8"));
if (clusterInfo.getClusterType().endsWith(LINKIS_ONE_VERSION)) {
multipartEntityBuilder.addTextBody("path", linkisConfig.getUploadPrefix() + LINKIS_ONE_UPLOAD_PREFIX + request.getHdfsPath());
} else {
multipartEntityBuilder.addTextBody("path", linkisConfig.getUploadPrefix() + request.getHdfsPath());
}
multipartEntityBuilder.addBinaryBody("file", tmpFile);
httppost.addHeader("Token-User", user.getUserName());
httppost.addHeader("Token-Code", clusterInfo.getLinkisToken());
httppost.setEntity(multipartEntityBuilder.build());
CloseableHttpResponse response = null;
try {
response = httpclient.execute(httppost);
} catch (IOException e) {
throw new UnExpectedRequestException("{&FAILED_TO_CALL_UPLOAD_API}");
}
int code = response.getStatusLine().getStatusCode();
response.close();
if (code != HttpStatus.SC_OK) {
throw new UnExpectedRequestException("{&FAILED_TO_CALL_UPLOAD_API}");
}
boolean tmpDeleteFlag = tmpFile.delete();
LOGGER.info("Delete tmp excel: " + tmpDeleteFlag);
return new GeneralResponse<>(code + "", "{&SUCCESS_TO_UPLOAD_ANALYSIS_EXCEL}", code);
}
use of com.webank.wedatasphere.qualitis.entity.ClusterInfo in project Qualitis by WeBankFinTech.
the class JobServiceImpl method getTaskLog.
@Override
public GeneralResponse<?> getTaskLog(Long taskId, String clusterName) throws UnExpectedRequestException {
Task task = taskDao.findById(taskId);
if (task == null) {
throw new UnExpectedRequestException("{&JOB_ID_DOES_NOT_EXIST}");
}
ClusterInfo clusterInfo = clusterInfoDao.findByClusterName(clusterName);
if (clusterInfo == null) {
throw new UnExpectedRequestException("Cluster info {&DOES_NOT_EXIST}");
}
LogResult logResult;
String proxyUser = task.getTaskProxyUser();
try {
logResult = monitorManager.getTaskPartialLog(task.getTaskRemoteId(), 0, StringUtils.isNotBlank(proxyUser) ? proxyUser : task.getApplication().getExecuteUser(), clusterInfo.getLinkisAddress(), clusterName);
} catch (LogPartialException | ClusterInfoNotConfigException e) {
throw new UnExpectedRequestException(e.getMessage());
}
LOGGER.info("Succeed to get task log, task_id: {}, cluster_id: {}", taskId, clusterName);
return new GeneralResponse<>("200", "{&SUCCEED_TO_GET_TASK_LOG}", logResult.getLog());
}
use of com.webank.wedatasphere.qualitis.entity.ClusterInfo in project Qualitis by WeBankFinTech.
the class ClusterInfoServiceImpl method deleteClusterInfo.
@Override
@Transactional(rollbackFor = { RuntimeException.class, UnExpectedRequestException.class })
public GeneralResponse<?> deleteClusterInfo(DeleteClusterInfoRequest request) throws UnExpectedRequestException {
// 检查参数
checkRequest(request);
// 根据id查找ClusterInfo,不存在则抛出异常
Long clusterInfoId = request.getClusterInfoId();
ClusterInfo clusterInfoInDb = clusterInfoDao.findById(clusterInfoId);
if (clusterInfoInDb == null) {
throw new UnExpectedRequestException("id {&DOES_NOT_EXIST}");
}
// 删除clusterInfo
clusterInfoDao.deleteClusterInfo(clusterInfoInDb);
LOGGER.info("Succeed to delete cluster_info. id: {}", request.getClusterInfoId());
return new GeneralResponse<>("200", "{&DELETE_CLUSTER_INFO_SUCCESSFULLY}", null);
}
use of com.webank.wedatasphere.qualitis.entity.ClusterInfo in project Qualitis by WeBankFinTech.
the class ClusterInfoServiceImpl method modifyClusterInfo.
@Override
@Transactional(rollbackFor = { RuntimeException.class, UnExpectedRequestException.class })
public GeneralResponse<?> modifyClusterInfo(ModifyClusterInfoRequest request) throws UnExpectedRequestException {
// 检查参数
checkRequest(request);
// 根据id查找clusterInfo,不存在抛出异常
Long id = request.getClusterInfoId();
ClusterInfo clusterInfoInDb = clusterInfoDao.findById(id);
if (clusterInfoInDb == null) {
throw new UnExpectedRequestException("id {&DOES_NOT_EXIST}");
}
LOGGER.info("Succeed to find cluster_info. cluster_info: {}", clusterInfoInDb);
// 修改clusterInfo信息
String clusterName = request.getClusterName();
String clusterType = request.getClusterType();
clusterInfoInDb.setClusterName(clusterName);
clusterInfoInDb.setClusterType(clusterType);
clusterInfoInDb.setLinkisAddress(request.getLinkisAddress());
clusterInfoInDb.setLinkisToken(request.getLinkisToken());
// 保存clusterInfo
ClusterInfo savedClusterInfo = clusterInfoDao.saveClusterInfo(clusterInfoInDb);
LOGGER.info("Succeed to modify cluster_info. cluster_info: {}", savedClusterInfo);
return new GeneralResponse<>("200", "{&MODIFY_CLUSTER_INFO_SUCCESSFULLY}", null);
}
use of com.webank.wedatasphere.qualitis.entity.ClusterInfo in project Qualitis by WeBankFinTech.
the class OuterExecutionServiceImpl method checkDatasource.
private void checkDatasource(Rule currentRule, String userName, StringBuffer partition, List<Map<String, String>> mappingCols, String nodeName, String clusterName, Map<Long, Map> dataSourceMysqlConnect) throws UnExpectedRequestException, MetaDataAcquireFailedException, DataSourceOverSizeException, DataSourceMoveException, BothNullDatasourceException, LeftNullDatasourceException, RightNullDatasourceException {
// For multi source rule to check tables' size before submit.
List<Double> datasourceSizeList = new ArrayList<>(currentRule.getRuleDataSources().size());
for (RuleDataSource ruleDataSource : currentRule.getRuleDataSources()) {
Map<String, String> mappingCol = null;
if (ORIGINAL_INDEX.equals(ruleDataSource.getDatasourceIndex())) {
continue;
}
if (ruleDataSource.getDatasourceIndex() != null && mappingCols.get(ruleDataSource.getDatasourceIndex()).size() > 0) {
mappingCol = mappingCols.get(ruleDataSource.getDatasourceIndex());
}
if (ruleDataSource.getLinkisDataSourceId() != null) {
LOGGER.info("Start to solve relationship datasource info.");
checkRdmsSqlMetaInfo(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), userName, ruleDataSource, mappingCol);
GeneralResponse<Map> dataSourceInfoDetail = metaDataClient.getDataSourceInfoDetail(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), userName, ruleDataSource.getLinkisDataSourceId(), ruleDataSource.getLinkisDataSourceVersionId());
GeneralResponse<Map> dataSourceConnectParams = metaDataClient.getDataSourceConnectParams(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), userName, ruleDataSource.getLinkisDataSourceId(), ruleDataSource.getLinkisDataSourceVersionId());
Map connectParamsReal = (Map) dataSourceConnectParams.getData().get("connectParams");
if (connectParamsReal.size() == 0) {
throw new UnExpectedRequestException("{&THE_DATASOURCE_IS_NOT_DEPLOYED}");
}
Map connectParams = (Map) ((Map) dataSourceInfoDetail.getData().get("info")).get("connectParams");
String dataType = (String) ((Map) ((Map) dataSourceInfoDetail.getData().get("info")).get("dataSourceType")).get("name");
connectParams.put("dataType", dataType);
dataSourceMysqlConnect.put(ruleDataSource.getId(), connectParams);
continue;
}
// Parse filter fields.
List<String> filterFields = getFilterFields(partition.toString());
if (StringUtils.isNotBlank(ruleDataSource.getDbName()) && !ruleDataSource.getDbName().equals(RuleConstraintEnum.CUSTOM_DATABASE_PREFIS.getValue())) {
// Get actual fields info.
List<ColumnInfoDetail> cols = metaDataClient.getColumnInfo(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), ruleDataSource.getDbName(), ruleDataSource.getTableName(), userName);
if (CollectionUtils.isEmpty(cols)) {
throw new DataSourceMoveException("Table[" + ruleDataSource.getTableName() + "]. {&RULE_DATASOURCE_BE_MOVED}");
}
// Get actual partition fields.
List<String> partitionFields = cols.stream().filter(ColumnInfoDetail::getPartitionField).map(ColumnInfoDetail::getFieldName).collect(Collectors.toList());
// Check filter fields.
boolean partitionTable = CollectionUtils.isNotEmpty(partitionFields);
if (partitionTable && partition.length() > 0) {
for (String filter : filterFields) {
if (!partitionFields.contains(filter)) {
throw new UnExpectedRequestException("Table[" + ruleDataSource.getTableName() + "]. {&THE_CHECK_FIELD_DOES_NOT_EXIST_IN_PARTITIONS}[" + filter + "]");
}
}
// Check partition size.
PartitionStatisticsInfo partitionStatisticsInfo = metaDataClient.getPartitionStatisticsInfo(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), ruleDataSource.getDbName(), ruleDataSource.getTableName(), filterToPartitionPath(partition.toString()), userName);
String fullSize = partitionStatisticsInfo.getPartitionSize();
ClusterInfo clusterInfo = clusterInfoDao.findByClusterName(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName());
if (clusterInfo != null && StringUtils.isNotBlank(clusterInfo.getSkipDataSize()) && StringUtils.isNotBlank(fullSize)) {
double number = 0;
String unit = "B";
if (!"0B".equals(fullSize)) {
number = Double.parseDouble(fullSize.split(" ")[0]);
unit = fullSize.split(" ")[1];
}
datasourceSizeList.add(number);
String[] skipDataSize = clusterInfo.getSkipDataSize().split(" ");
double res = UnitTransfer.alarmconfigToTaskResult(number, skipDataSize[1], unit);
LOGGER.info("Check datasource[" + fullSize + "] if or not oversize with system config[" + clusterInfo.getSkipDataSize() + "]");
if (res > Double.parseDouble(skipDataSize[0])) {
throw new DataSourceOverSizeException("Table[" + ruleDataSource.getTableName() + "]. {&TABLE_IS_OVERSIZE_WITH_SYSTEM_CONFIG}:[" + clusterInfo.getSkipDataSize() + "]");
}
}
} else {
// Check table size.
TableStatisticsInfo tableStatisticsInfo = metaDataClient.getTableStatisticsInfo(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName(), ruleDataSource.getDbName(), ruleDataSource.getTableName(), userName);
String fullSize = tableStatisticsInfo.getTableSize();
if (NULL_TABLE_SIZE.equals(fullSize)) {
throw new DataSourceMoveException("Table[" + ruleDataSource.getTableName() + "] {&RULE_DATASOURCE_BE_MOVED}");
}
ClusterInfo clusterInfo = clusterInfoDao.findByClusterName(StringUtils.isNotBlank(clusterName) ? clusterName : ruleDataSource.getClusterName());
if (clusterInfo != null && StringUtils.isNotBlank(clusterInfo.getSkipDataSize()) && StringUtils.isNotBlank(fullSize)) {
LOGGER.info("Check datasource[" + fullSize + "] if or not oversize with system config[" + clusterInfo.getSkipDataSize() + "]");
double number = 0;
String unit = "B";
if (!"0B".equals(fullSize)) {
number = Double.parseDouble(fullSize.split(" ")[0]);
unit = fullSize.split(" ")[1];
}
datasourceSizeList.add(number);
String[] skipDataSize = clusterInfo.getSkipDataSize().split(" ");
double res = UnitTransfer.alarmconfigToTaskResult(number, skipDataSize[1], unit);
if (res > Double.parseDouble(skipDataSize[0])) {
throw new DataSourceOverSizeException("Table[" + ruleDataSource.getTableName() + "] is oversize with system config:[" + clusterInfo.getSkipDataSize() + "]");
}
}
partition.delete(0, partition.length());
}
if (currentRule.getRuleType().equals(RuleTypeEnum.CUSTOM_RULE.getCode())) {
continue;
}
if (!metaDataClient.fieldExist(ruleDataSource.getColName(), cols, mappingCol)) {
throw new DataSourceMoveException("Table[" + ruleDataSource.getTableName() + "] {&RULE_DATASOURCE_BE_MOVED}");
}
} else if (StringUtils.isNotBlank(currentRule.getCsId())) {
checkDatasourceInContextService(ruleDataSource, mappingCol, clusterName, userName, nodeName, currentRule.getCsId());
}
}
if (CollectionUtils.isNotEmpty(datasourceSizeList) && currentRule.getRuleType().equals(RuleTypeEnum.MULTI_TEMPLATE_RULE.getCode())) {
double left = datasourceSizeList.get(0);
double right = datasourceSizeList.get(1);
LOGGER.info("Current multi source rule left table size number[{}], right table size number[{}]", left, right);
if (left == 0 && right == 0) {
throw new BothNullDatasourceException("{&BOTH_SIDE_ARE_NULL}");
} else if (left == 0) {
throw new LeftNullDatasourceException("{&ONE_SIDE_ARE_NULL}");
} else if (right == 0) {
throw new RightNullDatasourceException("{&ONE_SIDE_ARE_NULL}");
}
}
}
Aggregations