use of org.apache.iceberg.data.parquet.GenericParquetWriter in project drill by apache.
the class ParquetFileWriter method write.
@Override
public File write() {
Objects.requireNonNull(location, "File create location must be specified");
Objects.requireNonNull(name, "File name must be specified");
OutputFile outputFile = table.io().newOutputFile(new Path(location, FileFormat.PARQUET.addExtension(name)).toUri().getPath());
FileAppender<Record> fileAppender = null;
try {
fileAppender = Parquet.write(outputFile).forTable(table).createWriterFunc(GenericParquetWriter::buildWriter).build();
fileAppender.addAll(records);
fileAppender.close();
// metrics are available only when file was written (i.e. close method was executed)
return new File(outputFile, fileAppender.metrics());
} catch (IOException | ClassCastException | RuntimeIOException e) {
if (fileAppender != null) {
try {
fileAppender.close();
} catch (Exception ex) {
// write has failed anyway, ignore closing exception if any and throw initial one
}
}
throw new IcebergMetastoreException(String.format("Unable to write data into parquet file [%s]", outputFile.location()), e);
}
}
Aggregations