Search in sources :

Example 1 with HiveSinkInfo

use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.

the class HiveSinkITCase method prepareSinkSchema.

private HiveSinkInfo prepareSinkSchema() {
    final FieldInfo f1 = new FieldInfo(fieldName1, new TimestampFormatInfo("MILLIS"));
    final FieldInfo f2 = new FieldInfo(fieldName2, IntFormatInfo.INSTANCE);
    final FieldInfo f3 = new FieldInfo(fieldName3, StringFormatInfo.INSTANCE);
    final FieldInfo f4 = new FieldInfo(fieldName4, StringFormatInfo.INSTANCE);
    final HiveTimePartitionInfo timePartition = new HiveTimePartitionInfo(f1.getName(), timePartitionFormat);
    final HiveFieldPartitionInfo fieldPartition = new HiveFieldPartitionInfo(f2.getName());
    return new HiveSinkInfo(new FieldInfo[] { f1, f2, f3, f4 }, hiveMetastoreUrl, hiveDb, hiveTable, hiveUsername, hivePassword, dfsSchema + hdfsDataDir, new HivePartitionInfo[] { timePartition, fieldPartition }, new TextFileFormat("\t".charAt(0)));
}
Also used : HiveFieldPartitionInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveFieldPartitionInfo) TimestampFormatInfo(org.apache.inlong.sort.formats.common.TimestampFormatInfo) HiveTimePartitionInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveTimePartitionInfo) HiveSinkInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo) TextFileFormat(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.TextFileFormat) FieldInfo(org.apache.inlong.sort.protocol.FieldInfo)

Example 2 with HiveSinkInfo

use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.

the class Entrance method buildSinkStream.

private static void buildSinkStream(DataStream<Row> sourceStream, Configuration config, SinkInfo sinkInfo, Map<String, Object> properties, long dataflowId) throws IOException, ClassNotFoundException {
    final String sinkType = checkNotNull(config.getString(Constants.SINK_TYPE));
    final int sinkParallelism = config.getInteger(Constants.SINK_PARALLELISM);
    switch(sinkType) {
        case Constants.SINK_TYPE_CLICKHOUSE:
            checkState(sinkInfo instanceof ClickHouseSinkInfo);
            ClickHouseSinkInfo clickHouseSinkInfo = (ClickHouseSinkInfo) sinkInfo;
            sourceStream.addSink(new ClickhouseRowSinkFunction(clickHouseSinkInfo)).uid(Constants.SINK_UID).name("Clickhouse Sink").setParallelism(sinkParallelism);
            break;
        case Constants.SINK_TYPE_HIVE:
            checkState(sinkInfo instanceof HiveSinkInfo);
            HiveSinkInfo hiveSinkInfo = (HiveSinkInfo) sinkInfo;
            if (hiveSinkInfo.getPartitions().length == 0) {
                // The committer operator is not necessary if partition is not existent.
                sourceStream.process(new HiveWriter(config, dataflowId, hiveSinkInfo)).uid(Constants.SINK_UID).name("Hive Sink").setParallelism(sinkParallelism);
            } else {
                sourceStream.process(new HiveWriter(config, dataflowId, hiveSinkInfo)).uid(Constants.SINK_UID).name("Hive Sink").setParallelism(sinkParallelism).addSink(new HiveCommitter(config, hiveSinkInfo)).name("Hive Committer").setParallelism(1);
            }
            break;
        case Constants.SINK_TYPE_ICEBERG:
            checkState(sinkInfo instanceof IcebergSinkInfo);
            IcebergSinkInfo icebergSinkInfo = (IcebergSinkInfo) sinkInfo;
            TableLoader tableLoader = TableLoader.fromHadoopTable(icebergSinkInfo.getTableLocation(), new org.apache.hadoop.conf.Configuration());
            FlinkSink.forRow(sourceStream, CommonUtils.getTableSchema(sinkInfo.getFields())).tableLoader(tableLoader).writeParallelism(sinkParallelism).build();
            break;
        case Constants.SINK_TYPE_KAFKA:
            checkState(sinkInfo instanceof KafkaSinkInfo);
            SerializationSchema<Row> schema = SerializationSchemaFactory.build(sinkInfo.getFields(), ((KafkaSinkInfo) sinkInfo).getSerializationInfo());
            sourceStream.addSink(buildKafkaSink((KafkaSinkInfo) sinkInfo, properties, schema, config)).uid(Constants.SINK_UID).name("Kafka Sink").setParallelism(sinkParallelism);
            break;
        default:
            throw new IllegalArgumentException("Unsupported sink type " + sinkType);
    }
}
Also used : HiveWriter(org.apache.inlong.sort.flink.hive.HiveWriter) IcebergSinkInfo(org.apache.inlong.sort.protocol.sink.IcebergSinkInfo) ClickhouseRowSinkFunction(org.apache.inlong.sort.singletenant.flink.clickhouse.ClickhouseRowSinkFunction) ClickHouseSinkInfo(org.apache.inlong.sort.protocol.sink.ClickHouseSinkInfo) HiveCommitter(org.apache.inlong.sort.flink.hive.HiveCommitter) HiveSinkInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo) KafkaSinkInfo(org.apache.inlong.sort.protocol.sink.KafkaSinkInfo) Row(org.apache.flink.types.Row) TableLoader(org.apache.iceberg.flink.TableLoader)

Example 3 with HiveSinkInfo

use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.

the class JdbcHivePartitionTool method main.

public static void main(String[] args) throws Exception {
    final Configuration config = ParameterTool.fromArgs(args).getConfiguration();
    final List<HivePartitionInfo> partitions = new ArrayList<>();
    final List<String> partitionValues = new ArrayList<>();
    for (int i = 0; i < 10; i++) {
        String partitionName = config.getString("partition_" + i + "_name", null);
        if (partitionName != null) {
            partitions.add(new HiveFieldPartitionInfo(partitionName));
            partitionValues.add(config.getString("partition_" + i + "_value", ""));
        }
    }
    final String database = config.getString("database", null);
    final String table = config.getString("table", null);
    HiveSinkInfo hiveSinkInfo = new HiveSinkInfo(new FieldInfo[0], config.getString("metastore_address", null), database, table, config.getString("username", null), config.getString("password", null), config.getString("root_path", null), partitions.toArray(new HivePartitionInfo[0]), new TextFileFormat("\t".charAt(0)));
    JdbcHivePartitionCommitPolicy committer = new JdbcHivePartitionCommitPolicy(config, hiveSinkInfo);
    try {
        committer.commit(new Context() {

            @Override
            public String databaseName() {
                return database;
            }

            @Override
            public String tableName() {
                return table;
            }

            @Override
            public HivePartition partition() {
                HivePartition hivePartition = new HivePartition();
                List<Tuple2<String, String>> partitionPairs = new ArrayList<>();
                for (int i = 0; i < partitions.size(); i++) {
                    partitionPairs.add(Tuple2.of(partitions.get(i).getFieldName(), partitionValues.get(i)));
                }
                // noinspection unchecked
                hivePartition.setPartitions(partitionPairs.toArray(new Tuple2[0]));
                return hivePartition;
            }
        });
    } finally {
        committer.close();
    }
}
Also used : Context(org.apache.inlong.sort.flink.hive.partition.PartitionCommitPolicy.Context) Configuration(org.apache.inlong.sort.configuration.Configuration) ArrayList(java.util.ArrayList) HivePartitionInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HivePartitionInfo) HiveFieldPartitionInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveFieldPartitionInfo) HiveSinkInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo) List(java.util.List) ArrayList(java.util.ArrayList) TextFileFormat(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.TextFileFormat)

Example 4 with HiveSinkInfo

use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.

the class PulsarTestMetaManagerUtil method prepareDataFlowInfo.

@Override
public DataFlowInfo prepareDataFlowInfo(long dataFlowId, String... args) {
    FieldInfo[] pulsarFields = new FieldInfo[] { new FieldInfo("f1", StringFormatInfo.INSTANCE), new FieldInfo("f2", StringFormatInfo.INSTANCE) };
    Map<String, Object> config = new HashMap<>();
    config.put("consumer.bootstrap-mode", "earliest");
    return new DataFlowInfo(dataFlowId, new PulsarSourceInfo(args[0], args[1], args[2], args[3], new CsvDeserializationInfo(','), pulsarFields, null), new HiveSinkInfo(new FieldInfo[0], "testServerJdbcUrl", "testDatabaseName", "testTableName", "testUsername", "testPassword", "testDataPath", new HivePartitionInfo[0], new TextFileFormat(',')), config);
}
Also used : HashMap(java.util.HashMap) PulsarSourceInfo(org.apache.inlong.sort.protocol.source.PulsarSourceInfo) HiveSinkInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo) HivePartitionInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HivePartitionInfo) CsvDeserializationInfo(org.apache.inlong.sort.protocol.deserialization.CsvDeserializationInfo) TextFileFormat(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.TextFileFormat) FieldInfo(org.apache.inlong.sort.protocol.FieldInfo) DataFlowInfo(org.apache.inlong.sort.protocol.DataFlowInfo)

Example 5 with HiveSinkInfo

use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo in project incubator-inlong by apache.

the class SinkInfoUtils method createHiveSinkInfo.

/**
 * Create Hive sink info.
 */
private static HiveSinkInfo createHiveSinkInfo(HiveSinkResponse hiveInfo, List<FieldInfo> sinkFields) {
    if (hiveInfo.getJdbcUrl() == null) {
        throw new RuntimeException(String.format("HiveSink={%s} server url cannot be empty", hiveInfo));
    }
    if (CollectionUtils.isEmpty(hiveInfo.getFieldList())) {
        throw new RuntimeException(String.format("HiveSink={%s} fields cannot be empty", hiveInfo));
    }
    // Use the field separator in Hive, the default is TextFile
    Character separator = (char) Integer.parseInt(hiveInfo.getDataSeparator());
    HiveFileFormat fileFormat;
    String format = hiveInfo.getFileFormat();
    if (Constant.FILE_FORMAT_ORC.equalsIgnoreCase(format)) {
        fileFormat = new HiveSinkInfo.OrcFileFormat(1000);
    } else if (Constant.FILE_FORMAT_SEQUENCE.equalsIgnoreCase(format)) {
        fileFormat = new HiveSinkInfo.SequenceFileFormat(separator, 100);
    } else if (Constant.FILE_FORMAT_PARQUET.equalsIgnoreCase(format)) {
        fileFormat = new HiveSinkInfo.ParquetFileFormat();
    } else {
        fileFormat = new HiveSinkInfo.TextFileFormat(separator);
    }
    // The primary partition field, in Sink must be HiveTimePartitionInfo
    List<HivePartitionInfo> partitionList = new ArrayList<>();
    String primary = hiveInfo.getPrimaryPartition();
    if (StringUtils.isNotEmpty(primary)) {
        // Hive partitions are by day, hour, and minute
        String unit = hiveInfo.getPartitionUnit();
        HiveTimePartitionInfo timePartitionInfo = new HiveTimePartitionInfo(primary, PARTITION_TIME_FORMAT_MAP.get(unit));
        partitionList.add(timePartitionInfo);
    }
    // TODO the type be set according to the type of the field itself.
    if (StringUtils.isNotEmpty(hiveInfo.getSecondaryPartition())) {
        partitionList.add(new HiveSinkInfo.HiveFieldPartitionInfo(hiveInfo.getSecondaryPartition()));
    }
    // dataPath = hdfsUrl + / + warehouseDir + / + dbName + .db/ + tableName
    StringBuilder dataPathBuilder = new StringBuilder();
    String hdfsUrl = hiveInfo.getHdfsDefaultFs();
    String warehouseDir = hiveInfo.getWarehouseDir();
    if (hdfsUrl.endsWith("/")) {
        dataPathBuilder.append(hdfsUrl, 0, hdfsUrl.length() - 1);
    } else {
        dataPathBuilder.append(hdfsUrl);
    }
    if (warehouseDir.endsWith("/")) {
        dataPathBuilder.append(warehouseDir, 0, warehouseDir.length() - 1);
    } else {
        dataPathBuilder.append(warehouseDir);
    }
    String dataPath = dataPathBuilder.append("/").append(hiveInfo.getDbName()).append(".db/").append(hiveInfo.getTableName()).toString();
    return new HiveSinkInfo(sinkFields.toArray(new FieldInfo[0]), hiveInfo.getJdbcUrl(), hiveInfo.getDbName(), hiveInfo.getTableName(), hiveInfo.getUsername(), hiveInfo.getPassword(), dataPath, partitionList.toArray(new HiveSinkInfo.HivePartitionInfo[0]), fileFormat);
}
Also used : HiveTimePartitionInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveTimePartitionInfo) ArrayList(java.util.ArrayList) HivePartitionInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HivePartitionInfo) HiveSinkInfo(org.apache.inlong.sort.protocol.sink.HiveSinkInfo) HiveFileFormat(org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveFileFormat) FieldInfo(org.apache.inlong.sort.protocol.FieldInfo)

Aggregations

HiveSinkInfo (org.apache.inlong.sort.protocol.sink.HiveSinkInfo)5 FieldInfo (org.apache.inlong.sort.protocol.FieldInfo)3 HivePartitionInfo (org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HivePartitionInfo)3 TextFileFormat (org.apache.inlong.sort.protocol.sink.HiveSinkInfo.TextFileFormat)3 ArrayList (java.util.ArrayList)2 HiveFieldPartitionInfo (org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveFieldPartitionInfo)2 HiveTimePartitionInfo (org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveTimePartitionInfo)2 HashMap (java.util.HashMap)1 List (java.util.List)1 Row (org.apache.flink.types.Row)1 TableLoader (org.apache.iceberg.flink.TableLoader)1 Configuration (org.apache.inlong.sort.configuration.Configuration)1 HiveCommitter (org.apache.inlong.sort.flink.hive.HiveCommitter)1 HiveWriter (org.apache.inlong.sort.flink.hive.HiveWriter)1 Context (org.apache.inlong.sort.flink.hive.partition.PartitionCommitPolicy.Context)1 TimestampFormatInfo (org.apache.inlong.sort.formats.common.TimestampFormatInfo)1 DataFlowInfo (org.apache.inlong.sort.protocol.DataFlowInfo)1 CsvDeserializationInfo (org.apache.inlong.sort.protocol.deserialization.CsvDeserializationInfo)1 ClickHouseSinkInfo (org.apache.inlong.sort.protocol.sink.ClickHouseSinkInfo)1 HiveFileFormat (org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveFileFormat)1