use of com.dtstack.taier.pluginapi.pojo.Column in project Taier by DTStack.
the class ImpalaSyncBuilder method syncWriterBuild.
@Override
public Writer syncWriterBuild(List<Long> targetIds, Map<String, Object> targetMap, Reader reader) {
TableLocationType tableLocationType = TableLocationType.getTableLocationType((String) targetMap.get(TableLocationType.key()));
if (tableLocationType == null) {
throw new RdosDefineException("不支持的表存储类型");
}
if (tableLocationType == TableLocationType.HIVE) {
Map<String, Object> clone = new HashMap<>(targetMap);
String writeMode = (String) clone.get("writeMode");
writeMode = writeMode != null && writeMode.trim().length() != 0 ? SyncWriteMode.tranferHiveMode(writeMode) : SyncWriteMode.HIVE_OVERWRITE.getMode();
clone.put("writeMode", writeMode);
// 设置hdfs index字段
List column = (List) clone.get("column");
List<Column> allColumns = (List<Column>) clone.get("allColumns");
List<Column> partitionColumns = (List<Column>) clone.get("partitionColumns");
Map<String, Column> allColumnsMap = allColumns.stream().collect(Collectors.toMap(Column::getName, item -> item));
for (Object col : column) {
String name = (String) ((Map<String, Object>) col).get("key");
((Map<String, Object>) col).put("index", allColumnsMap.get(name).getIndex());
}
// 设置 fullColumnNames 和 fullColumnTypes 脏数据记录的时候需要
// 需要去掉分区字段
Set<String> partitionColumnNameSet = CollectionUtils.isEmpty(partitionColumns) ? new HashSet<>() : partitionColumns.stream().map(pColumn -> pColumn.getName()).collect(Collectors.toSet());
List<String> fullColumnNames = new ArrayList<>();
List<String> fullColumnTypes = new ArrayList<>();
for (Column allColumn : allColumns) {
if (!partitionColumnNameSet.contains(allColumn.getName())) {
fullColumnNames.add(allColumn.getName());
fullColumnTypes.add(allColumn.getType());
}
}
clone.put("fullColumnNames", fullColumnNames);
clone.put("fullColumnTypes", fullColumnTypes);
String partition = (String) clone.get("partition");
// fileName 逻辑参考自HiveWriter
String fileName = StringUtils.isNotEmpty(partition) ? partition : "";
clone.put("fileName", fileName);
return objToObject(clone, ImpalaHdfsWriter.class);
} else if (tableLocationType == TableLocationType.KUDU) {
KuduWriter kuduWriter = objToObject(targetMap, KuduWriter.class);
String kuduTableName = (String) targetMap.get("kuduTableName");
LOGGER.info("syncWriterBuild format impala kuduTableName :{} ", kuduTableName);
kuduWriter.setTable(kuduTableName);
return kuduWriter;
}
return null;
}
use of com.dtstack.taier.pluginapi.pojo.Column in project Taier by DTStack.
the class ImpalaHdfsReader method toReaderJson.
@Override
public JSONObject toReaderJson() {
HDFSReader hdfsReader = new HDFSReader();
hdfsReader.setHadoopConfig(hadoopConfig);
hdfsReader.setFieldDelimiter(fieldDelimiter);
// 前端传入的column参数没有index hdfs读取需要此参数
Map<String, Column> allColumnsMap = allColumns.stream().collect(Collectors.toMap(Column::getName, item -> item));
for (Object col : column) {
String name = (String) ((Map<String, Object>) col).get("key");
((Map<String, Object>) col).put("index", allColumnsMap.get(name).getIndex());
}
hdfsReader.setColumn(column);
hdfsReader.setDefaultFS(defaultFS);
hdfsReader.setEncoding(encoding);
hdfsReader.setExtralConfig(super.getExtralConfig());
hdfsReader.setFileType(fileType);
if (StringUtils.isNotEmpty(partition)) {
hdfsReader.setPath(path + "/" + partition);
} else {
hdfsReader.setPath(path);
}
if (MapUtils.isNotEmpty(sftpConf)) {
hdfsReader.setSftpConf(sftpConf);
}
if (StringUtils.isNotEmpty(remoteDir)) {
hdfsReader.setRemoteDir(remoteDir);
}
hdfsReader.setPath(hdfsReader.getPath().trim());
return hdfsReader.toReaderJson();
}
use of com.dtstack.taier.pluginapi.pojo.Column in project Taier by DTStack.
the class ImpalaUtils method getImpalaHiveTableDetailInfo.
public static Map<String, Object> getImpalaHiveTableDetailInfo(ISourceDTO iSourceDTO, String tableName) {
IClient client = ClientCache.getClient(DataSourceType.IMPALA.getVal());
SqlQueryDTO sqlQueryDTO = SqlQueryDTO.builder().tableName(tableName).build();
com.dtstack.dtcenter.loader.dto.Table tableInfo = client.getTable(iSourceDTO, sqlQueryDTO);
List<ColumnMetaDTO> columnMetaDTOList = tableInfo.getColumns();
List<Column> columns = new ArrayList<>();
List<Column> partitionColumns = new ArrayList<>();
ColumnMetaDTO columnMetaDTO = null;
for (int i = 0; i < columnMetaDTOList.size(); i++) {
columnMetaDTO = columnMetaDTOList.get(i);
Column column = new Column();
column.setName(columnMetaDTO.getKey());
column.setType(columnMetaDTO.getType());
column.setComment(columnMetaDTO.getComment());
column.setIndex(i);
columns.add(column);
if (columnMetaDTO.getPart()) {
partitionColumns.add(column);
}
}
Map<String, Object> map = new HashMap<>();
map.put("allColumns", columns);
map.put("partitionColumns", partitionColumns);
map.put("path", tableInfo.getPath());
map.put("fieldDelimiter", tableInfo.getDelim());
return map;
}
Aggregations