use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.
the class HiveSinkITCase method prepareSinkSchema.
private HiveSinkInfo prepareSinkSchema() {
final FieldInfo f1 = new FieldInfo(fieldName1, new TimestampFormatInfo("MILLIS"));
final FieldInfo f2 = new FieldInfo(fieldName2, IntFormatInfo.INSTANCE);
final FieldInfo f3 = new FieldInfo(fieldName3, StringFormatInfo.INSTANCE);
final FieldInfo f4 = new FieldInfo(fieldName4, StringFormatInfo.INSTANCE);
final HiveTimePartitionInfo timePartition = new HiveTimePartitionInfo(f1.getName(), timePartitionFormat);
final HiveFieldPartitionInfo fieldPartition = new HiveFieldPartitionInfo(f2.getName());
return new HiveSinkInfo(new FieldInfo[] { f1, f2, f3, f4 }, hiveMetastoreUrl, hiveDb, hiveTable, hiveUsername, hivePassword, dfsSchema + hdfsDataDir, new HivePartitionInfo[] { timePartition, fieldPartition }, new TextFileFormat("\t".charAt(0)));
}
use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.
the class Entrance method buildSinkStream.
private static void buildSinkStream(DataStream<Row> sourceStream, Configuration config, SinkInfo sinkInfo, Map<String, Object> properties, long dataflowId) throws IOException, ClassNotFoundException {
final String sinkType = checkNotNull(config.getString(Constants.SINK_TYPE));
final int sinkParallelism = config.getInteger(Constants.SINK_PARALLELISM);
switch(sinkType) {
case Constants.SINK_TYPE_CLICKHOUSE:
checkState(sinkInfo instanceof ClickHouseSinkInfo);
ClickHouseSinkInfo clickHouseSinkInfo = (ClickHouseSinkInfo) sinkInfo;
sourceStream.addSink(new ClickhouseRowSinkFunction(clickHouseSinkInfo)).uid(Constants.SINK_UID).name("Clickhouse Sink").setParallelism(sinkParallelism);
break;
case Constants.SINK_TYPE_HIVE:
checkState(sinkInfo instanceof HiveSinkInfo);
HiveSinkInfo hiveSinkInfo = (HiveSinkInfo) sinkInfo;
if (hiveSinkInfo.getPartitions().length == 0) {
// The committer operator is not necessary if partition is not existent.
sourceStream.process(new HiveWriter(config, dataflowId, hiveSinkInfo)).uid(Constants.SINK_UID).name("Hive Sink").setParallelism(sinkParallelism);
} else {
sourceStream.process(new HiveWriter(config, dataflowId, hiveSinkInfo)).uid(Constants.SINK_UID).name("Hive Sink").setParallelism(sinkParallelism).addSink(new HiveCommitter(config, hiveSinkInfo)).name("Hive Committer").setParallelism(1);
}
break;
case Constants.SINK_TYPE_ICEBERG:
checkState(sinkInfo instanceof IcebergSinkInfo);
IcebergSinkInfo icebergSinkInfo = (IcebergSinkInfo) sinkInfo;
TableLoader tableLoader = TableLoader.fromHadoopTable(icebergSinkInfo.getTableLocation(), new org.apache.hadoop.conf.Configuration());
FlinkSink.forRow(sourceStream, CommonUtils.getTableSchema(sinkInfo.getFields())).tableLoader(tableLoader).writeParallelism(sinkParallelism).build();
break;
case Constants.SINK_TYPE_KAFKA:
checkState(sinkInfo instanceof KafkaSinkInfo);
SerializationSchema<Row> schema = SerializationSchemaFactory.build(sinkInfo.getFields(), ((KafkaSinkInfo) sinkInfo).getSerializationInfo());
sourceStream.addSink(buildKafkaSink((KafkaSinkInfo) sinkInfo, properties, schema, config)).uid(Constants.SINK_UID).name("Kafka Sink").setParallelism(sinkParallelism);
break;
default:
throw new IllegalArgumentException("Unsupported sink type " + sinkType);
}
}
use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.
the class JdbcHivePartitionTool method main.
public static void main(String[] args) throws Exception {
final Configuration config = ParameterTool.fromArgs(args).getConfiguration();
final List<HivePartitionInfo> partitions = new ArrayList<>();
final List<String> partitionValues = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String partitionName = config.getString("partition_" + i + "_name", null);
if (partitionName != null) {
partitions.add(new HiveFieldPartitionInfo(partitionName));
partitionValues.add(config.getString("partition_" + i + "_value", ""));
}
}
final String database = config.getString("database", null);
final String table = config.getString("table", null);
HiveSinkInfo hiveSinkInfo = new HiveSinkInfo(new FieldInfo[0], config.getString("metastore_address", null), database, table, config.getString("username", null), config.getString("password", null), config.getString("root_path", null), partitions.toArray(new HivePartitionInfo[0]), new TextFileFormat("\t".charAt(0)));
JdbcHivePartitionCommitPolicy committer = new JdbcHivePartitionCommitPolicy(config, hiveSinkInfo);
try {
committer.commit(new Context() {
@Override
public String databaseName() {
return database;
}
@Override
public String tableName() {
return table;
}
@Override
public HivePartition partition() {
HivePartition hivePartition = new HivePartition();
List<Tuple2<String, String>> partitionPairs = new ArrayList<>();
for (int i = 0; i < partitions.size(); i++) {
partitionPairs.add(Tuple2.of(partitions.get(i).getFieldName(), partitionValues.get(i)));
}
// noinspection unchecked
hivePartition.setPartitions(partitionPairs.toArray(new Tuple2[0]));
return hivePartition;
}
});
} finally {
committer.close();
}
}
use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.
the class PulsarTestMetaManagerUtil method prepareDataFlowInfo.
@Override
public DataFlowInfo prepareDataFlowInfo(long dataFlowId, String... args) {
FieldInfo[] pulsarFields = new FieldInfo[] { new FieldInfo("f1", StringFormatInfo.INSTANCE), new FieldInfo("f2", StringFormatInfo.INSTANCE) };
Map<String, Object> config = new HashMap<>();
config.put("consumer.bootstrap-mode", "earliest");
return new DataFlowInfo(dataFlowId, new PulsarSourceInfo(args[0], args[1], args[2], args[3], new CsvDeserializationInfo(','), pulsarFields, null), new HiveSinkInfo(new FieldInfo[0], "testServerJdbcUrl", "testDatabaseName", "testTableName", "testUsername", "testPassword", "testDataPath", new HivePartitionInfo[0], new TextFileFormat(',')), config);
}
use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.
the class SinkInfoUtils method createHiveSinkInfo.
/**
* Create Hive sink info.
*/
private static HiveSinkInfo createHiveSinkInfo(HiveSinkResponse hiveInfo, List<FieldInfo> sinkFields) {
if (hiveInfo.getJdbcUrl() == null) {
throw new RuntimeException(String.format("HiveSink={%s} server url cannot be empty", hiveInfo));
}
if (CollectionUtils.isEmpty(hiveInfo.getFieldList())) {
throw new RuntimeException(String.format("HiveSink={%s} fields cannot be empty", hiveInfo));
}
// Use the field separator in Hive, the default is TextFile
Character separator = (char) Integer.parseInt(hiveInfo.getDataSeparator());
HiveFileFormat fileFormat;
String format = hiveInfo.getFileFormat();
if (Constant.FILE_FORMAT_ORC.equalsIgnoreCase(format)) {
fileFormat = new HiveSinkInfo.OrcFileFormat(1000);
} else if (Constant.FILE_FORMAT_SEQUENCE.equalsIgnoreCase(format)) {
fileFormat = new HiveSinkInfo.SequenceFileFormat(separator, 100);
} else if (Constant.FILE_FORMAT_PARQUET.equalsIgnoreCase(format)) {
fileFormat = new HiveSinkInfo.ParquetFileFormat();
} else {
fileFormat = new HiveSinkInfo.TextFileFormat(separator);
}
// The primary partition field, in Sink must be HiveTimePartitionInfo
List<HivePartitionInfo> partitionList = new ArrayList<>();
String primary = hiveInfo.getPrimaryPartition();
if (StringUtils.isNotEmpty(primary)) {
// Hive partitions are by day, hour, and minute
String unit = hiveInfo.getPartitionUnit();
HiveTimePartitionInfo timePartitionInfo = new HiveTimePartitionInfo(primary, PARTITION_TIME_FORMAT_MAP.get(unit));
partitionList.add(timePartitionInfo);
}
// TODO the type be set according to the type of the field itself.
if (StringUtils.isNotEmpty(hiveInfo.getSecondaryPartition())) {
partitionList.add(new HiveSinkInfo.HiveFieldPartitionInfo(hiveInfo.getSecondaryPartition()));
}
// dataPath = hdfsUrl + / + warehouseDir + / + dbName + .db/ + tableName
StringBuilder dataPathBuilder = new StringBuilder();
String hdfsUrl = hiveInfo.getHdfsDefaultFs();
String warehouseDir = hiveInfo.getWarehouseDir();
if (hdfsUrl.endsWith("/")) {
dataPathBuilder.append(hdfsUrl, 0, hdfsUrl.length() - 1);
} else {
dataPathBuilder.append(hdfsUrl);
}
if (warehouseDir.endsWith("/")) {
dataPathBuilder.append(warehouseDir, 0, warehouseDir.length() - 1);
} else {
dataPathBuilder.append(warehouseDir);
}
String dataPath = dataPathBuilder.append("/").append(hiveInfo.getDbName()).append(".db/").append(hiveInfo.getTableName()).toString();
return new HiveSinkInfo(sinkFields.toArray(new FieldInfo[0]), hiveInfo.getJdbcUrl(), hiveInfo.getDbName(), hiveInfo.getTableName(), hiveInfo.getUsername(), hiveInfo.getPassword(), dataPath, partitionList.toArray(new HiveSinkInfo.HivePartitionInfo[0]), fileFormat);
}
Aggregations