use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveFieldPartitionInfo in project incubator-inlong by apache.
the class HiveSinkITCase method prepareSinkSchema.
private HiveSinkInfo prepareSinkSchema() {
final FieldInfo f1 = new FieldInfo(fieldName1, new TimestampFormatInfo("MILLIS"));
final FieldInfo f2 = new FieldInfo(fieldName2, IntFormatInfo.INSTANCE);
final FieldInfo f3 = new FieldInfo(fieldName3, StringFormatInfo.INSTANCE);
final FieldInfo f4 = new FieldInfo(fieldName4, StringFormatInfo.INSTANCE);
final HiveTimePartitionInfo timePartition = new HiveTimePartitionInfo(f1.getName(), timePartitionFormat);
final HiveFieldPartitionInfo fieldPartition = new HiveFieldPartitionInfo(f2.getName());
return new HiveSinkInfo(new FieldInfo[] { f1, f2, f3, f4 }, hiveMetastoreUrl, hiveDb, hiveTable, hiveUsername, hivePassword, dfsSchema + hdfsDataDir, new HivePartitionInfo[] { timePartition, fieldPartition }, new TextFileFormat("\t".charAt(0)));
}
use of org.apache.inlong.sort.protocol.sink.HiveSinkInfo.HiveFieldPartitionInfo in project incubator-inlong by apache.
the class JdbcHivePartitionTool method main.
public static void main(String[] args) throws Exception {
final Configuration config = ParameterTool.fromArgs(args).getConfiguration();
final List<HivePartitionInfo> partitions = new ArrayList<>();
final List<String> partitionValues = new ArrayList<>();
for (int i = 0; i < 10; i++) {
String partitionName = config.getString("partition_" + i + "_name", null);
if (partitionName != null) {
partitions.add(new HiveFieldPartitionInfo(partitionName));
partitionValues.add(config.getString("partition_" + i + "_value", ""));
}
}
final String database = config.getString("database", null);
final String table = config.getString("table", null);
HiveSinkInfo hiveSinkInfo = new HiveSinkInfo(new FieldInfo[0], config.getString("metastore_address", null), database, table, config.getString("username", null), config.getString("password", null), config.getString("root_path", null), partitions.toArray(new HivePartitionInfo[0]), new TextFileFormat("\t".charAt(0)));
JdbcHivePartitionCommitPolicy committer = new JdbcHivePartitionCommitPolicy(config, hiveSinkInfo);
try {
committer.commit(new Context() {
@Override
public String databaseName() {
return database;
}
@Override
public String tableName() {
return table;
}
@Override
public HivePartition partition() {
HivePartition hivePartition = new HivePartition();
List<Tuple2<String, String>> partitionPairs = new ArrayList<>();
for (int i = 0; i < partitions.size(); i++) {
partitionPairs.add(Tuple2.of(partitions.get(i).getFieldName(), partitionValues.get(i)));
}
// noinspection unchecked
hivePartition.setPartitions(partitionPairs.toArray(new Tuple2[0]));
return hivePartition;
}
});
} finally {
committer.close();
}
}
Aggregations