use of com.dtstack.dtcenter.loader.dto.source.ISourceDTO in project Taier by DTStack.
the class DatasourceService method colMap.
/**
* 解析字段
* @param json
* @param type
* @param kerberosConfig
* @return
*/
private void colMap(JSONObject json, Integer type, Map<String, Object> kerberosConfig) {
if (DataSourceType.getKafkaS().contains(type)) {
ISourceDTO sourceDTO = SourceDTOType.getSourceDTO(json, type, Maps.newHashMap(), Maps.newHashMap());
String brokersAddress = null;
try {
brokersAddress = ClientCache.getKafka(type).getAllBrokersAddress(sourceDTO);
} catch (Exception e) {
LOGGER.error("获取kafka brokersAddress 异常!", e);
throw new PubSvcDefineException("获取kafka brokersAddress 异常!", e);
}
json.put("bootstrapServers", brokersAddress);
}
if (kerberosConfig != null) {
json.put(FormNames.KERBEROS_CONFIG, kerberosConfig);
}
}
use of com.dtstack.dtcenter.loader.dto.source.ISourceDTO in project Taier by DTStack.
the class DatasourceService method getAllSchemas.
/**
* 获取所有schema
* @param sourceId 数据源id
* @return
*/
public List<String> getAllSchemas(Long sourceId, String schema) {
BatchDataSource source = getOne(sourceId);
String dataJson = source.getDataJson();
JSONObject json = JSON.parseObject(dataJson);
ISourceDTO sourceDTO = SourceDTOType.getSourceDTO(json, source.getType(), fillKerberosConfig(sourceId), Maps.newHashMap());
IClient client = ClientCache.getClient(source.getType());
return client.getAllDatabases(sourceDTO, SqlQueryDTO.builder().schema(schema).build());
}
use of com.dtstack.dtcenter.loader.dto.source.ISourceDTO in project Taier by DTStack.
the class DatasourceService method getCreateTargetTableSql.
/**
* 一键生成目标表
*
* @param originSourceId
* @param tableName
* @param partition
*/
public String getCreateTargetTableSql(Long originSourceId, Long targetSourceId, String tableName, String partition, String sourceSchema, String targetSchema) {
try {
BatchDataSource originSource = getOne(originSourceId);
JSONObject reader = JSON.parseObject(originSource.getDataJson());
if (!ORIGIN_TABLE_ALLOW_TYPES.contains(originSource.getType())) {
throw new RdosDefineException("一键生成目标表,只支持关系型数据库、hive和maxCompute类型");
}
Map<String, Object> kerberosConfig = datasourceService.fillKerberosConfig(originSourceId);
Map<String, Object> expandConfigPrepare = datasourceService.expandConfigPrepare(originSourceId);
List<JSONObject> columnMetaData = new ArrayList<>();
IClient iClient = ClientCache.getClient(originSource.getType());
ISourceDTO sourceDTO = SourceDTOType.getSourceDTO(reader, originSource.getType(), kerberosConfig, expandConfigPrepare);
SqlQueryDTO sqlQueryDTO = SqlQueryDTO.builder().schema(sourceSchema).tableName(tableName).build();
List<ColumnMetaDTO> columnMeta = iClient.getColumnMetaData(sourceDTO, sqlQueryDTO);
if (CollectionUtils.isNotEmpty(columnMeta)) {
for (ColumnMetaDTO columnMetaDTO : columnMeta) {
JSONObject jsonObject = JSON.parseObject(JSON.toJSONString(columnMetaDTO));
jsonObject.put("isPart", columnMetaDTO.getPart());
columnMetaData.add(jsonObject);
}
}
String comment = iClient.getTableMetaComment(sourceDTO, sqlQueryDTO);
List<String> partList = null;
if (StringUtils.isNotBlank(partition)) {
String[] parts = partition.split("/");
partList = new ArrayList<>();
for (String part : parts) {
String[] partDetail = part.split("=");
String partCol = partDetail[0];
if (!partCol.equals("pt")) {
partList.add(partCol);
}
}
}
List<JSONObject> columns = null;
BatchDataSource targetDataSource = getOne(targetSourceId);
String sql;
// 'CHARNT.'CUSTMERS_10_MIN' 需要做处理
tableName = this.formatTableName(tableName);
int sourceType = 0;
if (targetDataSource != null) {
sourceType = Objects.isNull(targetDataSource) ? DataSourceType.HIVE.getVal() : targetDataSource.getType();
}
if (CREATE_TABLE_TO_PG_TABLE.contains(sourceType)) {
// 注意:ADB For PG不会在此处理,后面单独处理
columns = convertWriterColumns(columnMetaData, new PostgreSqlWriterFormat());
sql = generalLibraCreateSql(columns, tableName, targetSchema);
} else if (sourceType == DataSourceType.TiDB.getVal() || sourceType == DataSourceType.MySQL.getVal()) {
columns = convertTidbWriterColumns(columnMetaData, TYPE_FORMAT);
sql = generalTidbCreateSql(columns, tableName, comment);
} else if (sourceType == DataSourceType.Oracle.getVal()) {
columns = convertWriterColumns(columnMetaData, TYPE_FORMAT);
sql = this.generalTidbCreateSql(columns, tableName, comment);
} else if (sourceType == DataSourceType.ADB_FOR_PG.getVal()) {
columns = ADBForPGUtil.convertADBForPGWriterColumns(columnMetaData);
sql = ADBForPGUtil.generalCreateSql(targetSchema, tableName, columns, comment);
} else {
// 默认走hive建表
columns = convertWriterColumns(columnMetaData, TYPE_FORMAT);
sql = generalCreateSql(columns, partList, tableName, comment);
}
return sqlFormat(sql);
} catch (Exception e) {
throw new RdosDefineException("一键生成目标表失败", e);
}
}
Aggregations