use of com.fasterxml.jackson.dataformat.csv.CsvMapper in project synthea by synthetichealth.
the class NHANESSample method loadSamples.
/**
* Load the NHANES samples from resources.
* @return A list of samples.
*/
public static List<NHANESSample> loadSamples() {
CsvMapper mapper = new CsvMapper();
List<NHANESSample> samples = new LinkedList<NHANESSample>();
CsvSchema schema = CsvSchema.emptySchema().withHeader();
String filename = "nhanes_two_year_olds_bmi.csv";
try {
String rawCSV = Utilities.readResource(filename);
MappingIterator<NHANESSample> it = mapper.readerFor(NHANESSample.class).with(schema).readValues(rawCSV);
while (it.hasNextValue()) {
samples.add(it.nextValue());
}
} catch (Exception e) {
System.err.println("ERROR: unable to load CSV: " + filename);
e.printStackTrace();
throw new RuntimeException(e);
}
return samples;
}
use of com.fasterxml.jackson.dataformat.csv.CsvMapper in project flink by splunk.
the class CsvBulkWriter method forPojo.
/**
* Builds a writer based on a POJO class definition.
*
* @param pojoClass The class of the POJO.
* @param stream The output stream.
* @param <T> The type of the elements accepted by this writer.
*/
static <T> CsvBulkWriter<T, T, Void> forPojo(Class<T> pojoClass, FSDataOutputStream stream) {
final Converter<T, T, Void> converter = (value, context) -> value;
final CsvMapper csvMapper = new CsvMapper();
final CsvSchema schema = csvMapper.schemaFor(pojoClass).withoutQuoteChar();
return new CsvBulkWriter<>(csvMapper, schema, converter, null, stream);
}
use of com.fasterxml.jackson.dataformat.csv.CsvMapper in project flink by splunk.
the class RowDataToCsvConverters method createArrayRowFieldConverter.
private static RowFieldConverter createArrayRowFieldConverter(ArrayType type) {
LogicalType elementType = type.getElementType();
final ArrayElementConverter elementConverter = createNullableArrayElementConverter(elementType);
return (csvMapper, container, row, pos) -> {
ArrayNode arrayNode = csvMapper.createArrayNode();
ArrayData arrayData = row.getArray(pos);
int numElements = arrayData.size();
for (int i = 0; i < numElements; i++) {
arrayNode.add(elementConverter.convert(csvMapper, arrayNode, arrayData, i));
}
return arrayNode;
};
}
use of com.fasterxml.jackson.dataformat.csv.CsvMapper in project flink by splunk.
the class CsvFileFormatFactory method createEncodingFormat.
@Override
public EncodingFormat<Factory<RowData>> createEncodingFormat(DynamicTableFactory.Context context, ReadableConfig formatOptions) {
return new EncodingFormat<BulkWriter.Factory<RowData>>() {
@Override
public BulkWriter.Factory<RowData> createRuntimeEncoder(DynamicTableSink.Context context, DataType physicalDataType) {
final RowType rowType = (RowType) physicalDataType.getLogicalType();
final CsvSchema schema = buildCsvSchema(rowType, formatOptions);
final RowDataToCsvConverter converter = RowDataToCsvConverters.createRowConverter(rowType);
final CsvMapper mapper = new CsvMapper();
final ObjectNode container = mapper.createObjectNode();
final RowDataToCsvConverter.RowDataToCsvFormatConverterContext converterContext = new RowDataToCsvConverter.RowDataToCsvFormatConverterContext(mapper, container);
return out -> CsvBulkWriter.forSchema(mapper, schema, converter, converterContext, out);
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
};
}
use of com.fasterxml.jackson.dataformat.csv.CsvMapper in project flink by splunk.
the class DataStreamCsvITCase method testCsvReaderFormatFromSchema.
@Test
public void testCsvReaderFormatFromSchema() throws Exception {
writeFile(outDir, "data.csv", CSV_LINES_PIPE_SEPARATED);
CsvMapper mapper = new CsvMapper();
CsvSchema schema = mapper.schemaFor(CityPojo.class).withoutQuoteChar().withColumnSeparator('|');
final CsvReaderFormat<CityPojo> csvFormat = CsvReaderFormat.forSchema(mapper, schema, TypeInformation.of(CityPojo.class));
final List<CityPojo> result = initializeSourceAndReadData(outDir, csvFormat);
assertThat(Arrays.asList(POJOS)).isEqualTo(result);
}
Aggregations