Search in sources :

Example 1 with ClickhouseRowSinkFunction

use of org.apache.inlong.sort.singletenant.flink.clickhouse.ClickhouseRowSinkFunction in project incubator-inlong by apache.

the class Entrance method buildSinkStream.

private static void buildSinkStream(DataStream<Row> sourceStream, Configuration config, SinkInfo sinkInfo, Map<String, Object> properties, long dataflowId) throws IOException, ClassNotFoundException {
    final String sinkType = checkNotNull(config.getString(Constants.SINK_TYPE));
    final int sinkParallelism = config.getInteger(Constants.SINK_PARALLELISM);
    switch(sinkType) {
        case Constants.SINK_TYPE_CLICKHOUSE:
            checkState(sinkInfo instanceof ClickHouseSinkInfo);
            ClickHouseSinkInfo clickHouseSinkInfo = (ClickHouseSinkInfo) sinkInfo;
            sourceStream.addSink(new ClickhouseRowSinkFunction(clickHouseSinkInfo)).uid(Constants.SINK_UID).name("Clickhouse Sink").setParallelism(sinkParallelism);
            break;
        case Constants.SINK_TYPE_HIVE:
            checkState(sinkInfo instanceof HiveSinkInfo);
            HiveSinkInfo hiveSinkInfo = (HiveSinkInfo) sinkInfo;
            if (hiveSinkInfo.getPartitions().length == 0) {
                // The committer operator is not necessary if partition is not existent.
                sourceStream.process(new HiveWriter(config, dataflowId, hiveSinkInfo)).uid(Constants.SINK_UID).name("Hive Sink").setParallelism(sinkParallelism);
            } else {
                sourceStream.process(new HiveWriter(config, dataflowId, hiveSinkInfo)).uid(Constants.SINK_UID).name("Hive Sink").setParallelism(sinkParallelism).addSink(new HiveCommitter(config, hiveSinkInfo)).name("Hive Committer").setParallelism(1);
            }
            break;
        case Constants.SINK_TYPE_ICEBERG:
            checkState(sinkInfo instanceof IcebergSinkInfo);
            IcebergSinkInfo icebergSinkInfo = (IcebergSinkInfo) sinkInfo;
            TableLoader tableLoader = TableLoader.fromHadoopTable(icebergSinkInfo.getTableLocation(), new org.apache.hadoop.conf.Configuration());
            FlinkSink.forRow(sourceStream, CommonUtils.getTableSchema(sinkInfo.getFields())).tableLoader(tableLoader).writeParallelism(sinkParallelism).build();
            break;
        case Constants.SINK_TYPE_KAFKA:
            checkState(sinkInfo instanceof KafkaSinkInfo);
            SerializationSchema<Row> schema = SerializationSchemaFactory.build(sinkInfo.getFields(), ((KafkaSinkInfo) sinkInfo).getSerializationInfo());
            sourceStream.addSink(buildKafkaSink((KafkaSinkInfo) sinkInfo, properties, schema, config)).uid(Constants.SINK_UID).name("Kafka Sink").setParallelism(sinkParallelism);
            break;
        default:
            throw new IllegalArgumentException("Unsupported sink type " + sinkType);
    }
}
Also used : HiveWriter(org.apache.inlong.sort.flink.hive.HiveWriter) IcebergSinkInfo(org.apache.inlong.sort.protocol.sink.IcebergSinkInfo) ClickhouseRowSinkFunction(org.apache.inlong.sort.singletenant.flink.clickhouse.ClickhouseRowSinkFunction) ClickHouseSinkInfo(org.apache.inlong.sort.protocol.sink.ClickHouseSinkInfo) HiveCommitter(org.apache.inlong.sort.flink.hive.HiveCommitter) HiveSinkInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo) KafkaSinkInfo(org.apache.inlong.sort.protocol.sink.KafkaSinkInfo) Row(org.apache.flink.types.Row) TableLoader(org.apache.iceberg.flink.TableLoader)

Aggregations

Row (org.apache.flink.types.Row)1 TableLoader (org.apache.iceberg.flink.TableLoader)1 HiveCommitter (org.apache.inlong.sort.flink.hive.HiveCommitter)1 HiveWriter (org.apache.inlong.sort.flink.hive.HiveWriter)1 ClickHouseSinkInfo (org.apache.inlong.sort.protocol.sink.ClickHouseSinkInfo)1 HiveSinkInfo (org.apache.inlong.sort.protocol.sink.HiveSinkInfo)1 IcebergSinkInfo (org.apache.inlong.sort.protocol.sink.IcebergSinkInfo)1 KafkaSinkInfo (org.apache.inlong.sort.protocol.sink.KafkaSinkInfo)1 ClickhouseRowSinkFunction (org.apache.inlong.sort.singletenant.flink.clickhouse.ClickhouseRowSinkFunction)1