use of com.dtstack.dtcenter.loader.dto.source.ISourceDTO in project Taier by DTStack.
the class HiveBase method inferHdfsParams.
protected void inferHdfsParams() {
if (inferred.compareAndSet(false, true) && StringUtils.isNotBlank(table)) {
DataSourceType sourceType = DataSourceType.getSourceType(dataSourceType);
JSONObject dataJson = new JSONObject();
dataJson.put(SourceDTOType.JDBC_URL, jdbcUrl);
dataJson.put(SourceDTOType.JDBC_USERNAME, username);
dataJson.put(SourceDTOType.JDBC_PASSWORD, password);
ISourceDTO sourceDTO = SourceDTOType.getSourceDTO(dataJson, sourceType.getVal(), kerberosConfig, Maps.newHashMap());
IClient client = ClientCache.getClient(sourceType.getVal());
Table tableInfo = client.getTable(sourceDTO, SqlQueryDTO.builder().tableName(this.table).build());
List<ColumnMetaDTO> columnMetaData = tableInfo.getColumns();
for (ColumnMetaDTO dto : columnMetaData) {
if (!dto.getPart()) {
fullColumnNames.add(dto.getKey());
fullColumnTypes.add(dto.getType());
} else {
isPartitioned = true;
partitionedBy.add(dto.getKey());
}
}
if (isPartitioned) {
ITable tableClient = ClientCache.getTable(sourceType.getVal());
List<String> partitions = tableClient.showPartitions(sourceDTO, table);
partitions.forEach(bean -> {
partitionList.add(bean);
});
}
this.dbName = tableInfo.getDb();
this.path = tableInfo.getPath();
this.fileType = tableInfo.getStoreType();
this.fieldDelimiter = tableInfo.getDelim();
this.isTransaction = tableInfo.getIsTransTable();
}
for (int i = 0; i < fullColumnNames.size(); i++) {
for (Object col : column) {
if (fullColumnNames.get(i).equals(((Map<String, Object>) col).get("key"))) {
((Map<String, Object>) col).put("index", i);
break;
}
}
}
}
use of com.dtstack.dtcenter.loader.dto.source.ISourceDTO in project Taier by DTStack.
the class JdbcServiceImpl method executeQueryWithoutResult.
/**
* 执行查询
* @param tenantId
* @param userId
* @param eScheduleJobType
* @param schema
* @param sql
* @param connection
* @return
*/
@Override
public Boolean executeQueryWithoutResult(Long tenantId, Long userId, EScheduleJobType eScheduleJobType, String schema, String sql, Connection connection) {
ISourceDTO iSourceDTO = Engine2DTOService.get(tenantId, userId, eScheduleJobType, schema);
iSourceDTO.setConnection(connection);
IClient client = ClientCache.getClient(iSourceDTO.getSourceType());
LOGGER.info("集群执行SQL,tenantId:{},userId:{},jobType:{},schema:{},sql:{}", tenantId, userId, eScheduleJobType.getType(), schema, sql);
client.executeSqlWithoutResultSet(iSourceDTO, SqlQueryDTO.builder().sql(sql).build());
return Boolean.TRUE;
}
use of com.dtstack.dtcenter.loader.dto.source.ISourceDTO in project Taier by DTStack.
the class JdbcServiceImpl method executeQueryWithVariables.
public List<List<Object>> executeQueryWithVariables(Long tenantId, Long userId, EScheduleJobType eScheduleJobType, String schema, String sql, List<String> variables, Integer limit, String taskParam) {
List<List<Object>> returnList = new ArrayList<>();
JdbcInfo jdbcInfo = Engine2DTOService.getJdbcInfo(tenantId, userId, eScheduleJobType);
Integer maxRows = limit == null || limit == 0 ? jdbcInfo.getMaxRows() : limit;
ISourceDTO iSourceDTO = Engine2DTOService.get(tenantId, userId, Engine2DTOService.jobTypeTransitionDataSourceType(eScheduleJobType, jdbcInfo.getVersion()).getVal(), schema, jdbcInfo);
IClient client = ClientCache.getClient(iSourceDTO.getSourceType());
// 率先获取Con,复用,为什么不使用try with resource,因为关闭捕获的异常太大了
Connection con = client.getCon(iSourceDTO, taskParam);
// 处理 variables SQL
try {
iSourceDTO.setConnection(con);
List<Map<String, Object>> list;
if (CollectionUtils.isNotEmpty(variables)) {
variables.forEach(variable -> client.executeSqlWithoutResultSet(iSourceDTO, SqlQueryDTO.builder().sql(variable).limit(jdbcInfo.getMaxRows()).queryTimeout(jdbcInfo.getQueryTimeout()).build()));
list = client.executeQuery(iSourceDTO, SqlQueryDTO.builder().sql(sql).limit(maxRows).queryTimeout(jdbcInfo.getQueryTimeout()).build());
} else {
list = client.executeQuery(iSourceDTO, SqlQueryDTO.builder().sql(sql).limit(maxRows).queryTimeout(jdbcInfo.getQueryTimeout()).build());
}
LOGGER.info("集群执行SQL查询,tenantId:{},userId:{},jobType:{},schema:{},sql:{}", tenantId, userId, eScheduleJobType.getType(), schema, sql);
List<ColumnMetaDTO> columnMetaDataWithSql = client.getColumnMetaDataWithSql(iSourceDTO, SqlQueryDTO.builder().sql(sql).limit(0).queryTimeout(jdbcInfo.getQueryTimeout()).build());
if (CollectionUtils.isNotEmpty(columnMetaDataWithSql)) {
List<Object> column = new ArrayList<>();
columnMetaDataWithSql.stream().forEach(bean -> {
column.add(bean.getKey());
});
returnList.add(column);
}
// 数据源插件化 查询出值不符合要求 进行转化
if (CollectionUtils.isNotEmpty(list)) {
for (Map<String, Object> result : list) {
List<Object> value = new ArrayList<>();
result.values().forEach(bean -> {
value.add(bean);
});
returnList.add(value);
}
}
} finally {
iSourceDTO.setConnection(null);
DBUtil.closeDBResources(null, null, con);
}
return returnList;
}
use of com.dtstack.dtcenter.loader.dto.source.ISourceDTO in project Taier by DTStack.
the class DatasourceService method getHivePartitions.
public Set<String> getHivePartitions(Long sourceId, String tableName) {
BatchDataSource source = getOne(sourceId);
JSONObject json = JSON.parseObject(source.getDataJson());
Map<String, Object> kerberosConfig = this.fillKerberosConfig(sourceId);
ISourceDTO sourceDTO = SourceDTOType.getSourceDTO(json, source.getType(), kerberosConfig, Maps.newHashMap());
IClient iClient = ClientCache.getClient(source.getType());
List<ColumnMetaDTO> partitionColumn = iClient.getPartitionColumn(sourceDTO, SqlQueryDTO.builder().tableName(tableName).build());
Set<String> partitionNameSet = Sets.newHashSet();
// 格式化分区信息 与hive保持一致
if (CollectionUtils.isNotEmpty(partitionColumn)) {
StringJoiner tempJoiner = new StringJoiner("=/", "", "=");
for (ColumnMetaDTO column : partitionColumn) {
tempJoiner.add(column.getKey());
}
partitionNameSet.add(tempJoiner.toString());
}
return partitionNameSet;
}
use of com.dtstack.dtcenter.loader.dto.source.ISourceDTO in project Taier by DTStack.
the class DatasourceService method executeOnSpecifySourceWithOutResult.
/**
* @param sourceId 数据源id
* @param sql 拼写sql
* @param targetSchema 只做doris入参,其他类型不用传
*/
private void executeOnSpecifySourceWithOutResult(Long sourceId, String sql, String targetSchema) {
BatchDataSource source = getOne(sourceId);
DataSourceType dataSourceType = DataSourceType.getSourceType(source.getType());
if (!SUPPORT_CREATE_TABLE_DATASOURCES.contains(dataSourceType)) {
throw new RdosDefineException(String.format("只支持创建%s数据源表", SUPPORT_CREATE_TABLE_DATASOURCES_NAMES));
}
JSONObject json = JSON.parseObject(source.getDataJson());
try {
Map<String, Object> kerberosConfig = fillKerberosConfig(sourceId);
Map<String, Object> expandConfigPrepare = expandConfigPrepare(sourceId);
ISourceDTO sourceDTO = SourceDTOType.getSourceDTO(json, source.getType(), kerberosConfig, expandConfigPrepare);
IClient iClient = ClientCache.getClient(dataSourceType.getVal());
Connection con = iClient.getCon(sourceDTO);
DBUtil.executeSqlWithoutResultSet(con, sql, false);
} catch (Exception e) {
throw new RdosDefineException(e.getMessage() + "。 执行sql = " + sql, e);
}
}
Aggregations