use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.csv.CsvMapper in project SONG by overture-stack.
the class PcawgSampleSheetConfig method pcawgSampleSheetReader.
public static ObjectReader pcawgSampleSheetReader(boolean hasHeader, char separator) {
val csvMapper = new CsvMapper();
val csvSchema = CsvSchema.emptySchema().withHeader().withColumnSeparator(separator);
return csvMapper.readerFor(PcawgSampleBean.class).with(csvSchema);
}
use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.csv.CsvMapper in project flink by apache.
the class CsvBulkWriter method forPojo.
/**
* Builds a writer based on a POJO class definition.
*
* @param pojoClass The class of the POJO.
* @param stream The output stream.
* @param <T> The type of the elements accepted by this writer.
*/
static <T> CsvBulkWriter<T, T, Void> forPojo(Class<T> pojoClass, FSDataOutputStream stream) {
final Converter<T, T, Void> converter = (value, context) -> value;
final CsvMapper csvMapper = new CsvMapper();
final CsvSchema schema = csvMapper.schemaFor(pojoClass).withoutQuoteChar();
return new CsvBulkWriter<>(csvMapper, schema, converter, null, stream);
}
use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.csv.CsvMapper in project flink by apache.
the class CsvFileFormatFactory method createEncodingFormat.
@Override
public EncodingFormat<Factory<RowData>> createEncodingFormat(DynamicTableFactory.Context context, ReadableConfig formatOptions) {
return new EncodingFormat<BulkWriter.Factory<RowData>>() {
@Override
public BulkWriter.Factory<RowData> createRuntimeEncoder(DynamicTableSink.Context context, DataType physicalDataType) {
final RowType rowType = (RowType) physicalDataType.getLogicalType();
final CsvSchema schema = buildCsvSchema(rowType, formatOptions);
final RowDataToCsvConverter converter = RowDataToCsvConverters.createRowConverter(rowType);
final CsvMapper mapper = new CsvMapper();
final ObjectNode container = mapper.createObjectNode();
final RowDataToCsvConverter.RowDataToCsvFormatConverterContext converterContext = new RowDataToCsvConverter.RowDataToCsvFormatConverterContext(mapper, container);
return out -> CsvBulkWriter.forSchema(mapper, schema, converter, converterContext, out);
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
};
}
use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.csv.CsvMapper in project goci by EBISPOT.
the class FileHandler method getStudyPatchRequests.
public static List<StudyPatchRequest> getStudyPatchRequests(FileUploadRequest fileUploadRequest) {
CsvMapper mapper = new CsvMapper();
CsvSchema schema = getSchemaFromMultiPartFile(fileUploadRequest.getMultipartFile());
List<StudyPatchRequest> studyPatchRequests;
try {
InputStream inputStream = fileUploadRequest.getMultipartFile().getInputStream();
MappingIterator<StudyPatchRequest> iterator = mapper.readerFor(StudyPatchRequest.class).with(schema).readValues(inputStream);
studyPatchRequests = iterator.readAll();
} catch (IOException e) {
throw new FileUploadException("Could not read the file");
}
return studyPatchRequests;
}
use of org.apache.flink.shaded.jackson2.com.fasterxml.jackson.dataformat.csv.CsvMapper in project goci by EBISPOT.
the class FileHandler method serializePojoToTsv.
public static String serializePojoToTsv(List<?> pojoList) {
CsvMapper csvMapper = new CsvMapper();
List<Map<String, Object>> dataList = csvMapper.convertValue(pojoList, new TypeReference<Object>() {
});
List<List<String>> csvData = new ArrayList<>();
List<String> csvHead = new ArrayList<>();
AtomicInteger counter = new AtomicInteger();
dataList.forEach(row -> {
List<String> rowData = new ArrayList<>();
row.forEach((key, value) -> {
rowData.add(String.valueOf(value));
if (counter.get() == 0) {
csvHead.add(key);
}
});
csvData.add(rowData);
counter.getAndIncrement();
});
CsvSchema.Builder builder = CsvSchema.builder();
csvHead.forEach(builder::addColumn);
CsvSchema schema = builder.build().withHeader().withLineSeparator("\n").withColumnSeparator('\t');
String result = "";
try {
result = csvMapper.writer(schema).writeValueAsString(csvData);
} catch (IOException e) {
throw new FileUploadException("Could not read the file");
}
return result;
}
Aggregations