use of com.facebook.presto.hive.parquet.write.TestMapredParquetOutputFormat in project presto by prestodb.
the class ParquetTester method writeParquetColumn.
private static DataSize writeParquetColumn(JobConf jobConf, File outputFile, CompressionCodecName compressionCodecName, Properties tableProperties, SettableStructObjectInspector objectInspector, Iterator<?>[] valuesByField, Optional<MessageType> parquetSchema, boolean singleLevelArray) throws Exception {
RecordWriter recordWriter = new TestMapredParquetOutputFormat(parquetSchema, singleLevelArray).getHiveRecordWriter(jobConf, new Path(outputFile.toURI()), Text.class, compressionCodecName != UNCOMPRESSED, tableProperties, () -> {
});
Object row = objectInspector.create();
List<StructField> fields = ImmutableList.copyOf(objectInspector.getAllStructFieldRefs());
while (stream(valuesByField).allMatch(Iterator::hasNext)) {
for (int field = 0; field < fields.size(); field++) {
Object value = valuesByField[field].next();
objectInspector.setStructFieldData(row, fields.get(field), value);
}
ParquetHiveSerDe serde = new ParquetHiveSerDe();
serde.initialize(jobConf, tableProperties, null);
Writable record = serde.serialize(row, objectInspector);
recordWriter.write(record);
}
recordWriter.close(false);
return succinctBytes(outputFile.length());
}
Aggregations