use of org.apache.commons.csv.CSVFormat in project bookish by parrt.
the class DataTable method parseCSV.
public void parseCSV(String csv) {
try {
Reader in = new StringReader(csv);
CSVFormat format = CSVFormat.EXCEL.withHeader();
CSVParser parser = format.parse(in);
this.firstColIsIndex = false;
for (CSVRecord record : parser) {
if (!firstColIsIndex && Character.isAlphabetic(record.get(0).charAt(0))) {
// latch if we see alpha not number
firstColIsIndex = true;
}
List<String> row = new ArrayList<>();
for (int i = 0; i < record.size(); i++) {
String v = record.get(i);
if (!NumberUtils.isDigits(v) && NumberUtils.isCreatable(v)) {
v = String.format("%.4f", Precision.round(Double.valueOf(v), 4));
} else {
v = abbrevString(v, 25);
}
row.add(v);
}
rows.add(row);
}
Set<String> colNames = parser.getHeaderMap().keySet();
if (!firstColIsIndex) {
// remove index column name
colNames.remove("");
}
this.colNames.addAll(colNames);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.commons.csv.CSVFormat in project thingsboard by thingsboard.
the class CassandraDbHelper method loadCf.
public static void loadCf(KeyspaceMetadata ks, Session session, String cfName, String[] columns, Path sourceFile, boolean parseHeader) throws Exception {
TableMetadata tableMetadata = ks.getTable(cfName);
PreparedStatement prepared = session.prepare(createInsertStatement(cfName, columns));
CSVFormat csvFormat = CSV_DUMP_FORMAT;
if (parseHeader) {
csvFormat = csvFormat.withFirstRecordAsHeader();
} else {
csvFormat = CSV_DUMP_FORMAT.withHeader(columns);
}
try (CSVParser csvParser = new CSVParser(Files.newBufferedReader(sourceFile), csvFormat)) {
csvParser.forEach(record -> {
BoundStatement boundStatement = prepared.bind();
for (String column : columns) {
setColumnValue(tableMetadata, column, record, boundStatement);
}
session.execute(boundStatement);
});
}
}
use of org.apache.commons.csv.CSVFormat in project thingsboard by thingsboard.
the class SqlDbHelper method dumpTableIfExists.
public static Path dumpTableIfExists(Connection conn, String tableName, String[] columns, String[] defaultValues, String dumpPrefix, boolean printHeader) throws Exception {
if (tableExists(conn, tableName)) {
Path dumpFile = Files.createTempFile(dumpPrefix, null);
Files.deleteIfExists(dumpFile);
CSVFormat csvFormat = CSV_DUMP_FORMAT;
if (printHeader) {
csvFormat = csvFormat.withHeader(columns);
}
try (CSVPrinter csvPrinter = new CSVPrinter(Files.newBufferedWriter(dumpFile), csvFormat)) {
try (PreparedStatement stmt = conn.prepareStatement("SELECT * FROM " + tableName)) {
try (ResultSet tableRes = stmt.executeQuery()) {
ResultSetMetaData resMetaData = tableRes.getMetaData();
Map<String, Integer> columnIndexMap = new HashMap<>();
for (int i = 1; i <= resMetaData.getColumnCount(); i++) {
String columnName = resMetaData.getColumnName(i);
columnIndexMap.put(columnName.toUpperCase(), i);
}
while (tableRes.next()) {
dumpRow(tableRes, columnIndexMap, columns, defaultValues, csvPrinter);
}
}
}
}
return dumpFile;
} else {
return null;
}
}
use of org.apache.commons.csv.CSVFormat in project fql by CategoricalData.
the class ToCsvPragmaInstance method getFormat.
public static CSVFormat getFormat(AqlOptions op) {
String format0 = "Default";
CSVFormat format = CSVFormat.valueOf(format0);
format = format.withDelimiter((Character) op.getOrDefault(AqlOption.csv_field_delim_char));
format = format.withQuote((Character) op.getOrDefault(AqlOption.csv_quote_char));
format = format.withEscape((Character) op.getOrDefault(AqlOption.csv_escape_char));
format = format.withQuoteMode(QuoteMode.ALL);
format = format.withNullString(null);
return format;
}
use of org.apache.commons.csv.CSVFormat in project opentest by mcdcorp.
the class ReadCsv method run.
@Override
public void run() {
super.run();
String filePath = this.readStringArgument("file", null);
String csvString = this.readStringArgument("csvString", null);
String delimiter = this.readStringArgument("delimiter", null);
String escapeChar = this.readStringArgument("escapeChar", null);
String recordSeparator = this.readStringArgument("recordSeparator", null);
Boolean excludeBom = this.readBooleanArgument("excludeBom", Boolean.TRUE);
Boolean hasHeader = this.readBooleanArgument("hasHeader", Boolean.FALSE);
String format = this.readStringArgument("format", "default");
List<String> fieldNames = this.readArrayArgument("fieldNames", String.class, null);
try {
Reader csvReader;
if (filePath != null) {
if (excludeBom) {
csvReader = new InputStreamReader(new BOMInputStream(new FileInputStream(filePath)), CharEncoding.UTF_8);
} else {
csvReader = Files.newBufferedReader(Paths.get(filePath), Charsets.UTF_8);
}
} else if (csvString != null) {
csvReader = new StringReader(csvString);
} else {
throw new RuntimeException("Neither the \"file\" argument, nor the \"csv\" argument were provided.");
}
CSVFormat csvFormat = this.getCsvFormat(format);
if (hasHeader) {
csvFormat = csvFormat.withFirstRecordAsHeader();
}
if (delimiter != null) {
csvFormat = csvFormat.withDelimiter(delimiter.charAt(0));
}
if (escapeChar != null) {
csvFormat = csvFormat.withEscape(escapeChar.charAt(0));
}
if (recordSeparator != null) {
csvFormat = csvFormat.withRecordSeparator(recordSeparator);
}
List<Map<String, String>> recordsArray = new ArrayList<>();
CSVParser parser = csvFormat.parse(csvReader);
Iterable<CSVRecord> records = (Iterable<CSVRecord>) parser;
for (CSVRecord record : records) {
if (hasHeader) {
recordsArray.add(record.toMap());
} else {
Map<String, String> recordsMap = new HashMap<>();
Iterator<String> fields = record.iterator();
int columnNo = 1;
while (fields.hasNext()) {
String field = fields.next();
if (fieldNames != null && fieldNames.size() >= columnNo && fieldNames.get(columnNo - 1) != null) {
recordsMap.put(fieldNames.get(columnNo - 1).trim(), field);
} else {
recordsMap.put(String.format("col%s", columnNo), field);
}
columnNo++;
}
recordsArray.add(recordsMap);
}
}
this.writeOutput("header", parser.getHeaderMap());
this.writeOutput("records", recordsArray);
} catch (Exception ex) {
throw new RuntimeException("Failed to parse CSV", ex);
}
}
Aggregations