use of org.apache.commons.csv.CSVRecord in project archi by archimatetool.
the class CSVImporter method getRecords.
// -------------------------------- Helpers --------------------------------
/**
* Get all records for a CSV file.
* This is a brute-force approach to try with a comma delimiter first. If that fails then
* try a semicolon, and if that fails, a tab.
*
* @param file The file to open
* @return Records, which may be empty but never null
* @throws IOException
*/
List<CSVRecord> getRecords(File file) throws IOException {
List<CSVRecord> records = new ArrayList<CSVRecord>();
CSVParser parser = null;
// $NON-NLS-1$
String errorMessage = "invalid char between encapsulated token and delimiter";
try {
// $NON-NLS-1$
InputStreamReader is = new InputStreamReader(new BOMInputStream(new FileInputStream(file)), "UTF-8");
parser = new CSVParser(is, CSVFormat.DEFAULT);
records = parser.getRecords();
} catch (IOException ex) {
if (parser != null) {
parser.close();
}
if (ex.getMessage() != null && ex.getMessage().contains(errorMessage)) {
try {
// $NON-NLS-1$
InputStreamReader is = new InputStreamReader(new BOMInputStream(new FileInputStream(file)), "UTF-8");
parser = new CSVParser(is, CSVFormat.DEFAULT.withDelimiter(';'));
records = parser.getRecords();
} catch (IOException ex2) {
if (parser != null) {
parser.close();
}
if (ex2.getMessage() != null && ex2.getMessage().contains(errorMessage)) {
// $NON-NLS-1$
InputStreamReader is = new InputStreamReader(new BOMInputStream(new FileInputStream(file)), "UTF-8");
parser = new CSVParser(is, CSVFormat.DEFAULT.withDelimiter('\t'));
records = parser.getRecords();
} else {
throw ex2;
}
}
} else {
throw ex;
}
} finally {
if (parser != null) {
parser.close();
}
}
return records;
}
use of org.apache.commons.csv.CSVRecord in project symja_android_library by axkr.
the class Import method evaluate.
@Override
public IExpr evaluate(final IAST ast, EvalEngine engine) {
Validate.checkSize(ast, 3);
if (!(ast.arg1() instanceof IStringX)) {
throw new WrongNumberOfArguments(ast, 1, ast.size() - 1);
}
if (!(ast.arg2() instanceof IStringX)) {
throw new WrongNumberOfArguments(ast, 2, ast.size() - 1);
}
IStringX arg1 = (IStringX) ast.arg1();
IStringX arg2 = (IStringX) ast.arg2();
FileReader reader = null;
try {
reader = new FileReader(arg1.toString());
if (arg2.contentEquals("Table")) {
AST2Expr ast2Expr = AST2Expr.CONST;
if (engine.isRelaxedSyntax()) {
ast2Expr = AST2Expr.CONST_LC;
}
final Parser parser = new Parser(engine.isRelaxedSyntax(), true);
CSVFormat csvFormat = CSVFormat.RFC4180.withDelimiter(' ');
Iterable<CSVRecord> records = csvFormat.parse(reader);
IAST rowList = F.List();
for (CSVRecord record : records) {
IAST columnList = F.List();
for (String string : record) {
final ASTNode node = parser.parse(string);
IExpr temp = ast2Expr.convert(node, engine);
columnList.append(temp);
}
rowList.append(columnList);
}
return rowList;
}
} catch (IOException ioe) {
engine.printMessage("Import: file " + arg1.toString() + " not found!");
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
}
}
}
return F.NIL;
}
use of org.apache.commons.csv.CSVRecord in project opennms by OpenNMS.
the class RScriptExecutor method fromCsv.
/**
* Convert the CSV string to an immutable table.
*/
protected static ImmutableTable<Long, String, Double> fromCsv(final String csv) throws IOException {
ImmutableTable.Builder<Long, String, Double> builder = ImmutableTable.builder();
try (StringReader reader = new StringReader(csv);
CSVParser parser = new CSVParser(reader, CSVFormat.RFC4180.withHeader())) {
long rowIndex = 0;
Map<String, Integer> headerMap = parser.getHeaderMap();
for (CSVRecord record : parser) {
for (String key : headerMap.keySet()) {
Double value;
try {
value = Double.valueOf(record.get(key));
} catch (NumberFormatException e) {
value = Double.NaN;
}
builder.put(rowIndex, key, value);
}
rowIndex++;
}
}
return builder.build();
}
use of org.apache.commons.csv.CSVRecord in project ArachneCentralAPI by OHDSI.
the class SubmissionHelper method parseCsvDataframeToJson.
private JsonObject parseCsvDataframeToJson(String filepath) throws IOException {
final JsonObject resultInfo = new JsonObject();
final CSVParser parser = CSVParser.parse(contentStorageService.getContentByFilepath(filepath), Charset.defaultCharset(), CSVFormat.DEFAULT.withHeader());
final Map<String, Integer> headerMap = parser.getHeaderMap();
final List<CSVRecord> csvRecordList = parser.getRecords();
JsonArray jsonHeaders = new JsonArray();
headerMap.forEach((key, value) -> jsonHeaders.add(key));
resultInfo.add("headers", jsonHeaders);
JsonArray jsonRecords = new JsonArray();
csvRecordList.forEach(record -> {
final JsonObject jsonRecord = new JsonObject();
for (Map.Entry<String, Integer> entry : headerMap.entrySet()) {
final String key = entry.getKey();
final String value = record.get(entry.getValue());
if (NumberUtils.isCreatable(value)) {
jsonRecord.addProperty(key, Float.parseFloat(value));
} else {
jsonRecord.addProperty(key, value);
}
}
jsonRecords.add(jsonRecord);
});
resultInfo.add("records", jsonRecords);
return resultInfo;
}
use of org.apache.commons.csv.CSVRecord in project kylo by Teradata.
the class CSVFileSchemaParser method populateSchema.
private DefaultFileSchema populateSchema(CSVParser parser) {
DefaultFileSchema fileSchema = new DefaultFileSchema();
int i = 0;
ArrayList<Field> fields = new ArrayList<>();
for (CSVRecord record : parser) {
if (i > 9) {
break;
}
int size = record.size();
for (int j = 0; j < size; j++) {
DefaultField field = null;
if (i == 0) {
field = new DefaultField();
if (headerRow) {
field.setName(record.get(j));
} else {
field.setName("Col_" + (j + 1));
}
fields.add(field);
} else {
try {
field = (DefaultField) fields.get(j);
field.getSampleValues().add(StringUtils.defaultString(record.get(j), ""));
} catch (IndexOutOfBoundsException e) {
LOG.warn("Sample file has potential sparse column problem at row [?] field [?]", i + 1, j + 1);
}
}
}
i++;
}
fileSchema.setFields(fields);
return fileSchema;
}
Aggregations