use of com.dtstack.taier.develop.utils.develop.sync.template.OdpsBase in project Taier by DTStack.
the class DatasourceService method syncReaderBuild.
private Reader syncReaderBuild(final Integer sourceType, final Map<String, Object> sourceMap, final List<Long> sourceIds) throws IOException {
Reader reader = null;
if (Objects.nonNull(RDBMSSourceType.getByDataSourceType(sourceType)) && !DataSourceType.HIVE.getVal().equals(sourceType) && !DataSourceType.HIVE1X.getVal().equals(sourceType) && !DataSourceType.HIVE3X.getVal().equals(sourceType) && !DataSourceType.CarbonData.getVal().equals(sourceType) && !DataSourceType.IMPALA.getVal().equals(sourceType) && !DataSourceType.SparkThrift2_1.getVal().equals(sourceType)) {
reader = PublicUtil.objectToObject(sourceMap, RDBReader.class);
((RDBBase) reader).setSourceIds(sourceIds);
return reader;
}
if (DataSourceType.HDFS.getVal().equals(sourceType)) {
return PublicUtil.objectToObject(sourceMap, HDFSReader.class);
}
if (DataSourceType.HIVE.getVal().equals(sourceType) || DataSourceType.HIVE3X.getVal().equals(sourceType) || DataSourceType.HIVE1X.getVal().equals(sourceType) || DataSourceType.SparkThrift2_1.getVal().equals(sourceType)) {
return PublicUtil.objectToObject(sourceMap, HiveReader.class);
}
if (DataSourceType.HBASE.getVal().equals(sourceType)) {
return PublicUtil.objectToObject(sourceMap, HBaseReader.class);
}
if (DataSourceType.FTP.getVal().equals(sourceType)) {
reader = PublicUtil.objectToObject(sourceMap, FtpReader.class);
if (sourceMap.containsKey("isFirstLineHeader") && (Boolean) sourceMap.get("isFirstLineHeader")) {
((FtpReader) reader).setFirstLineHeader(true);
} else {
((FtpReader) reader).setFirstLineHeader(false);
}
return reader;
}
if (DataSourceType.MAXCOMPUTE.getVal().equals(sourceType)) {
reader = PublicUtil.objectToObject(sourceMap, OdpsReader.class);
((OdpsBase) reader).setSourceId(sourceIds.get(0));
return reader;
}
if (DataSourceType.ES.getVal().equals(sourceType)) {
return PublicUtil.objectToObject(sourceMap, EsReader.class);
}
if (DataSourceType.MONGODB.getVal().equals(sourceType)) {
return PublicUtil.objectToObject(sourceMap, MongoDbReader.class);
}
if (DataSourceType.CarbonData.getVal().equals(sourceType)) {
return PublicUtil.objectToObject(sourceMap, CarbonDataReader.class);
}
if (DataSourceType.Kudu.getVal().equals(sourceType)) {
return syncBuilderFactory.getSyncBuilder(DataSourceType.Kudu.getVal()).syncReaderBuild(sourceMap, sourceIds);
}
if (DataSourceType.INFLUXDB.getVal().equals(sourceType)) {
return PublicUtil.objectToObject(sourceMap, InfluxDBReader.class);
}
if (DataSourceType.IMPALA.getVal().equals(sourceType)) {
// setSftpConf时,设置的hdfsConfig和sftpConf
if (sourceMap.containsKey(HADOOP_CONFIG)) {
Object impalaConfig = sourceMap.get(HADOOP_CONFIG);
if (impalaConfig instanceof Map) {
sourceMap.put(HADOOP_CONFIG, impalaConfig);
sourceMap.put("sftpConf", ((Map) impalaConfig).get("sftpConf"));
}
}
return syncBuilderFactory.getSyncBuilder(DataSourceType.IMPALA.getVal()).syncReaderBuild(sourceMap, sourceIds);
}
if (DataSourceType.AWS_S3.getVal().equals(sourceType)) {
return PublicUtil.objectToObject(sourceMap, AwsS3Reader.class);
}
throw new RdosDefineException("暂不支持" + DataSourceType.getSourceType(sourceType).name() + "作为数据同步的源");
}
use of com.dtstack.taier.develop.utils.develop.sync.template.OdpsBase in project Taier by DTStack.
the class DatasourceService method syncWriterBuild.
private Writer syncWriterBuild(final Integer targetType, final List<Long> targetIds, final Map<String, Object> targetMap, final Reader reader) throws IOException {
Writer writer = null;
if (Objects.nonNull(RDBMSSourceType.getByDataSourceType(targetType)) && !DataSourceType.HIVE.getVal().equals(targetType) && !DataSourceType.HIVE1X.getVal().equals(targetType) && !DataSourceType.HIVE3X.getVal().equals(targetType) && !DataSourceType.IMPALA.getVal().equals(targetType) && !DataSourceType.CarbonData.getVal().equals(targetType) && !DataSourceType.SparkThrift2_1.getVal().equals(targetType) && !DataSourceType.INCEPTOR.getVal().equals(targetType)) {
writer = PublicUtil.objectToObject(targetMap, RDBWriter.class);
((RDBBase) writer).setSourceIds(targetIds);
return writer;
}
if (DataSourceType.HDFS.getVal().equals(targetType)) {
return PublicUtil.objectToObject(targetMap, HDFSWriter.class);
}
if (DataSourceType.HIVE.getVal().equals(targetType) || DataSourceType.HIVE3X.getVal().equals(targetType) || DataSourceType.HIVE1X.getVal().equals(targetType) || DataSourceType.SparkThrift2_1.getVal().equals(targetType)) {
return PublicUtil.objectToObject(targetMap, HiveWriter.class);
}
if (DataSourceType.FTP.getVal().equals(targetType)) {
return PublicUtil.objectToObject(targetMap, FtpWriter.class);
}
if (DataSourceType.ES.getVal().equals(targetType)) {
return PublicUtil.objectToObject(targetMap, EsWriter.class);
}
if (DataSourceType.HBASE.getVal().equals(targetType)) {
targetMap.put("hbaseConfig", targetMap.get("hbaseConfig"));
writer = PublicUtil.objectToObject(targetMap, HBaseWriter.class);
HBaseWriter hbaseWriter = (HBaseWriter) writer;
List<String> sourceColNames = new ArrayList<>();
List<Map<String, String>> columnList = (List<Map<String, String>>) targetMap.get("column");
for (Map<String, String> column : columnList) {
if (column.containsKey("key")) {
sourceColNames.add(column.get("key"));
}
}
hbaseWriter.setSrcColumns(sourceColNames);
return writer;
}
if (DataSourceType.MAXCOMPUTE.getVal().equals(targetType)) {
writer = PublicUtil.objectToObject(targetMap, OdpsWriter.class);
((OdpsBase) writer).setSourceId(targetIds.get(0));
return writer;
}
if (DataSourceType.REDIS.getVal().equals(targetType)) {
return PublicUtil.objectToObject(targetMap, RedisWriter.class);
}
if (DataSourceType.MONGODB.getVal().equals(targetType)) {
return PublicUtil.objectToObject(targetMap, MongoDbWriter.class);
}
if (DataSourceType.CarbonData.getVal().equals(targetType)) {
return PublicUtil.objectToObject(targetMap, CarbonDataWriter.class);
}
if (DataSourceType.Kudu.getVal().equals(targetType)) {
return syncBuilderFactory.getSyncBuilder(DataSourceType.Kudu.getVal()).syncWriterBuild(targetIds, targetMap, reader);
}
if (DataSourceType.IMPALA.getVal().equals(targetType)) {
return syncBuilderFactory.getSyncBuilder(DataSourceType.IMPALA.getVal()).syncWriterBuild(targetIds, targetMap, reader);
}
if (DataSourceType.AWS_S3.getVal().equals(targetType)) {
return PublicUtil.objectToObject(targetMap, AwsS3Writer.class);
}
if (DataSourceType.INCEPTOR.getVal().equals(targetType)) {
return PublicUtil.objectToObject(targetMap, InceptorWriter.class);
}
throw new RdosDefineException("暂不支持" + DataSourceType.getSourceType(targetType).name() + "作为数据同步的目标");
}
Aggregations