use of org.apache.commons.csv.CSVRecord in project webapp by elimu-ai.
the class CsvContentExtractionHelper method getLettersFromCsvBackup.
/**
* For information on how the CSV files were generated, see {@link LetterCsvExportController#handleRequest}.
*/
public static List<Letter> getLettersFromCsvBackup(File csvFile, SoundDao soundDao) {
logger.info("getLettersFromCsvBackup");
List<Letter> letters = new ArrayList<>();
Path csvFilePath = Paths.get(csvFile.toURI());
logger.info("csvFilePath: " + csvFilePath);
try {
Reader reader = Files.newBufferedReader(csvFilePath);
CSVFormat csvFormat = CSVFormat.DEFAULT.withHeader("id", "text", "diacritic", "usage_count").withSkipHeaderRecord();
CSVParser csvParser = new CSVParser(reader, csvFormat);
for (CSVRecord csvRecord : csvParser) {
logger.info("csvRecord: " + csvRecord);
Letter letter = new Letter();
String text = csvRecord.get("text");
letter.setText(text);
boolean diacritic = Boolean.valueOf(csvRecord.get("diacritic"));
letter.setDiacritic(diacritic);
Integer usageCount = Integer.valueOf(csvRecord.get("usage_count"));
letter.setUsageCount(usageCount);
letters.add(letter);
}
} catch (IOException ex) {
logger.error(ex);
}
return letters;
}
use of org.apache.commons.csv.CSVRecord in project webapp by elimu-ai.
the class CsvContentExtractionHelper method getStoryBooksFromCsvBackup.
/**
* For information on how the CSV files were generated, see {@link StoryBookCsvExportController#handleRequest}.
* <p />
* Also see {@link #getStoryBookChaptersFromCsvBackup}
*/
public static List<StoryBookGson> getStoryBooksFromCsvBackup(File csvFile) {
logger.info("getStoryBooksFromCsvBackup");
List<StoryBookGson> storyBookGsons = new ArrayList<>();
Path csvFilePath = Paths.get(csvFile.toURI());
logger.info("csvFilePath: " + csvFilePath);
try {
Reader reader = Files.newBufferedReader(csvFilePath);
CSVFormat csvFormat = CSVFormat.DEFAULT.withHeader("id", "title", "description", "content_license", "attribution_url", "reading_level", "cover_image_id", "chapters").withSkipHeaderRecord();
CSVParser csvParser = new CSVParser(reader, csvFormat);
for (CSVRecord csvRecord : csvParser) {
logger.info("csvRecord: " + csvRecord);
// Convert from CSV to GSON
StoryBookGson storyBookGson = new StoryBookGson();
String title = csvRecord.get("title");
storyBookGson.setTitle(title);
String description = csvRecord.get("description");
storyBookGson.setDescription(description);
if (StringUtils.isNotBlank(csvRecord.get("content_license"))) {
ContentLicense contentLicense = ContentLicense.valueOf(csvRecord.get("content_license"));
// storyBookGson.setContentLicense(contentLicense);
}
String attributionUrl = csvRecord.get("attribution_url");
if (StringUtils.isNotBlank(csvRecord.get("reading_level"))) {
ReadingLevel readingLevel = ReadingLevel.valueOf(csvRecord.get("reading_level"));
storyBookGson.setReadingLevel(readingLevel);
}
if (StringUtils.isNotBlank(csvRecord.get("cover_image_id"))) {
Long coverImageId = Long.valueOf(csvRecord.get("cover_image_id"));
// storyBookGson.setCoverImage();
}
List<StoryBookChapterGson> storyBookChapterGsons = new ArrayList<>();
JSONArray chaptersJsonArray = new JSONArray(csvRecord.get("chapters"));
logger.info("chaptersJsonArray: " + chaptersJsonArray);
for (int i = 0; i < chaptersJsonArray.length(); i++) {
JSONObject chapterJsonObject = chaptersJsonArray.getJSONObject(i);
logger.info("chapterJsonObject: " + chapterJsonObject);
StoryBookChapterGson storyBookChapterGson = new StoryBookChapterGson();
storyBookChapterGson.setSortOrder(chapterJsonObject.getInt("sortOrder"));
List<StoryBookParagraphGson> storyBookParagraphGsons = new ArrayList<>();
JSONArray paragraphsJsonArray = chapterJsonObject.getJSONArray("storyBookParagraphs");
logger.info("paragraphsJsonArray: " + paragraphsJsonArray);
for (int j = 0; j < paragraphsJsonArray.length(); j++) {
JSONObject paragraphJsonObject = paragraphsJsonArray.getJSONObject(j);
logger.info("paragraphJsonObject: " + paragraphJsonObject);
StoryBookParagraphGson storyBookParagraphGson = new StoryBookParagraphGson();
storyBookParagraphGson.setSortOrder(paragraphJsonObject.getInt("sortOrder"));
storyBookParagraphGson.setOriginalText(paragraphJsonObject.getString("originalText"));
// TODO: setWords
storyBookParagraphGsons.add(storyBookParagraphGson);
}
storyBookChapterGson.setStoryBookParagraphs(storyBookParagraphGsons);
storyBookChapterGsons.add(storyBookChapterGson);
}
storyBookGson.setStoryBookChapters(storyBookChapterGsons);
storyBookGsons.add(storyBookGson);
}
} catch (IOException ex) {
logger.error(ex);
}
return storyBookGsons;
}
use of org.apache.commons.csv.CSVRecord in project kylo by Teradata.
the class CSVFileSchemaParser method populateSchema.
private DefaultFileSchema populateSchema(CSVParser parser) {
DefaultFileSchema fileSchema = new DefaultFileSchema();
int i = 0;
ArrayList<Field> fields = new ArrayList<>();
for (CSVRecord record : parser) {
if (i > 9) {
break;
}
int size = record.size();
for (int j = 0; j < size; j++) {
DefaultField field = null;
if (i == 0) {
field = new DefaultField();
if (headerRow) {
field.setName(record.get(j));
} else {
field.setName("Col_" + (j + 1));
}
fields.add(field);
} else {
try {
field = (DefaultField) fields.get(j);
field.getSampleValues().add(StringUtils.defaultString(record.get(j), ""));
} catch (IndexOutOfBoundsException e) {
LOG.warn("Sample file has potential sparse column problem at row [?] field [?]", i + 1, j + 1);
}
}
}
i++;
}
fileSchema.setFields(fields);
return fileSchema;
}
use of org.apache.commons.csv.CSVRecord in project drools-wb by kiegroup.
the class ScenarioCsvImportExport method importData.
public <T extends AbstractScesimData> AbstractScesimModel<T> importData(String raw, AbstractScesimModel<T> originalScesimModel) throws IOException {
CSVParser csvParser = CSVFormat.DEFAULT.parse(new StringReader(raw));
AbstractScesimModel<T> toReturn = originalScesimModel.cloneModel();
toReturn.clearDatas();
List<FactMapping> factMappings = toReturn.getScesimModelDescriptor().getUnmodifiableFactMappings();
List<CSVRecord> csvRecords = csvParser.getRecords();
if (csvRecords.size() < HEADER_SIZE) {
throw new IllegalArgumentException("Malformed file, missing header");
}
csvRecords = csvRecords.subList(HEADER_SIZE, csvRecords.size());
for (CSVRecord csvRecord : csvRecords) {
T scesimDataToFill = toReturn.addData();
if (csvRecord.size() != factMappings.size()) {
throw new IllegalArgumentException("Malformed row " + csvRecord);
}
for (int i = 0; i < factMappings.size(); i += 1) {
FactMapping factMapping = factMappings.get(i);
String valueToImport = "".equals(csvRecord.get(i)) ? null : csvRecord.get(i);
scesimDataToFill.addMappingValue(factMapping.getFactIdentifier(), factMapping.getExpressionIdentifier(), valueToImport);
}
}
return toReturn;
}
use of org.apache.commons.csv.CSVRecord in project ArachneCentralAPI by OHDSI.
the class SubmissionHelper method parseCsvDataframeToJson.
private JsonObject parseCsvDataframeToJson(String filepath) throws IOException {
final JsonObject resultInfo = new JsonObject();
final CSVParser parser = CSVParser.parse(contentStorageService.getContentByFilepath(filepath), Charset.defaultCharset(), CSVFormat.DEFAULT.withHeader());
final Map<String, Integer> headerMap = parser.getHeaderMap();
final List<CSVRecord> csvRecordList = parser.getRecords();
JsonArray jsonHeaders = new JsonArray();
headerMap.forEach((key, value) -> jsonHeaders.add(key));
resultInfo.add("headers", jsonHeaders);
JsonArray jsonRecords = new JsonArray();
csvRecordList.forEach(record -> {
final JsonObject jsonRecord = new JsonObject();
for (Map.Entry<String, Integer> entry : headerMap.entrySet()) {
final String key = entry.getKey();
final String value = record.get(entry.getValue());
if (NumberUtils.isCreatable(value)) {
jsonRecord.addProperty(key, Float.parseFloat(value));
} else {
jsonRecord.addProperty(key, value);
}
}
jsonRecords.add(jsonRecord);
});
resultInfo.add("records", jsonRecords);
return resultInfo;
}
Aggregations