use of org.apache.commons.csv.CSVRecord in project phoenix by apache.
the class CsvToKeyValueMapperTest method testCsvLineParserWithQuoting.
@Test
public void testCsvLineParserWithQuoting() throws IOException {
CsvToKeyValueMapper.CsvLineParser lineParser = new CsvToKeyValueMapper.CsvLineParser(';', '"', '\\');
CSVRecord parsed = lineParser.parse("\"\\\"one\";\"\\;two\\\\\"");
assertEquals("\"one", parsed.get(0));
assertEquals(";two\\", parsed.get(1));
assertTrue(parsed.isConsistent());
assertEquals(1, parsed.getRecordNumber());
}
use of org.apache.commons.csv.CSVRecord in project phoenix by apache.
the class GoogleChartGenerator method read.
/**
* Reads aggregate file and convert it to DataNode
* @param label
* @throws Exception
*/
private void read(String label) throws Exception {
String resultFileName = resultDir + PherfConstants.PATH_SEPARATOR + PherfConstants.RESULT_PREFIX + label + ResultFileDetails.CSV_AGGREGATE_PERFORMANCE.getExtension();
FileReader in = new FileReader(resultFileName);
final CSVParser parser = new CSVParser(in, CSVFormat.DEFAULT.withHeader());
for (CSVRecord record : parser) {
String group = record.get("QUERY_GROUP");
String query = record.get("QUERY");
String explain = record.get("EXPLAIN_PLAN");
String tenantId = record.get("TENANT_ID");
long avgTime = Long.parseLong(record.get("AVG_TIME_MS"));
long minTime = Long.parseLong(record.get("AVG_MIN_TIME_MS"));
long numRuns = Long.parseLong(record.get("RUN_COUNT"));
long rowCount = Long.parseLong(record.get("RESULT_ROW_COUNT"));
Node node = new Node(minTime, avgTime, numRuns, explain, query, tenantId, label, rowCount);
if (datanodes.containsKey(group)) {
datanodes.get(group).getDataSet().put(label, node);
} else {
datanodes.put(group, new DataNode(label, node));
}
}
parser.close();
}
use of org.apache.commons.csv.CSVRecord in project opentest by mcdcorp.
the class ReadCsv method run.
@Override
public void run() {
super.run();
String filePath = this.readStringArgument("file", null);
String csvString = this.readStringArgument("csvString", null);
String delimiter = this.readStringArgument("delimiter", null);
String escapeChar = this.readStringArgument("escapeChar", null);
String recordSeparator = this.readStringArgument("recordSeparator", null);
Boolean excludeBom = this.readBooleanArgument("excludeBom", Boolean.TRUE);
Boolean hasHeader = this.readBooleanArgument("hasHeader", Boolean.FALSE);
String format = this.readStringArgument("format", "default");
List<String> fieldNames = this.readArrayArgument("fieldNames", String.class, null);
try {
Reader csvReader;
if (filePath != null) {
if (excludeBom) {
csvReader = new InputStreamReader(new BOMInputStream(new FileInputStream(filePath)), CharEncoding.UTF_8);
} else {
csvReader = Files.newBufferedReader(Paths.get(filePath), Charsets.UTF_8);
}
} else if (csvString != null) {
csvReader = new StringReader(csvString);
} else {
throw new RuntimeException("Neither the \"file\" argument, nor the \"csv\" argument were provided.");
}
CSVFormat csvFormat = this.getCsvFormat(format);
if (hasHeader) {
csvFormat = csvFormat.withFirstRecordAsHeader();
}
if (delimiter != null) {
csvFormat = csvFormat.withDelimiter(delimiter.charAt(0));
}
if (escapeChar != null) {
csvFormat = csvFormat.withEscape(escapeChar.charAt(0));
}
if (recordSeparator != null) {
csvFormat = csvFormat.withRecordSeparator(recordSeparator);
}
List<Map<String, String>> recordsArray = new ArrayList<>();
CSVParser parser = csvFormat.parse(csvReader);
Iterable<CSVRecord> records = (Iterable<CSVRecord>) parser;
for (CSVRecord record : records) {
if (hasHeader) {
recordsArray.add(record.toMap());
} else {
Map<String, String> recordsMap = new HashMap<>();
Iterator<String> fields = record.iterator();
int columnNo = 1;
while (fields.hasNext()) {
String field = fields.next();
if (fieldNames != null && fieldNames.size() >= columnNo && fieldNames.get(columnNo - 1) != null) {
recordsMap.put(fieldNames.get(columnNo - 1).trim(), field);
} else {
recordsMap.put(String.format("col%s", columnNo), field);
}
columnNo++;
}
recordsArray.add(recordsMap);
}
}
this.writeOutput("header", parser.getHeaderMap());
this.writeOutput("records", recordsArray);
} catch (Exception ex) {
throw new RuntimeException("Failed to parse CSV", ex);
}
}
use of org.apache.commons.csv.CSVRecord in project Orthanc_Tools by salimkanoun.
the class AutoQuery method csvReading.
/**
* CSV reader and inject value in table
* @param file
* @param table
* @throws IOException
*/
protected void csvReading(File file, JTable table) throws IOException {
CSVFormat csvFileFormat = CSVFormat.EXCEL.withFirstRecordAsHeader().withIgnoreEmptyLines();
CSVParser csvParser = CSVParser.parse(file, StandardCharsets.UTF_8, csvFileFormat);
// On met les records dans une list
List<CSVRecord> csvRecord = csvParser.getRecords();
// On balaie le fichier ligne par ligne
int discarded = 0;
Boolean error = false;
for (int i = 0; i < csvRecord.size(); i++) {
try {
// On recupere les variables
String name = csvRecord.get(i).get(0);
String prenom = csvRecord.get(i).get(1);
String id = csvRecord.get(i).get(2);
String accession = csvRecord.get(i).get(3);
String dateFrom = csvRecord.get(i).get(4);
String dateTo = csvRecord.get(i).get(5);
String modality = csvRecord.get(i).get(6);
String studyDescription = csvRecord.get(i).get(7);
// On les pousse dans le tableau
DefaultTableModel model = (DefaultTableModel) table.getModel();
model.addRow(new Object[] { name, prenom, id, accession, dateFrom, dateTo, modality, studyDescription });
} catch (NullPointerException | ArrayIndexOutOfBoundsException e) {
System.out.println("Error in line " + i + " discarding");
discarded++;
error = true;
}
}
if (error)
JOptionPane.showMessageDialog(null, discarded + " lines discarded, see console for more details", "Wrong Input", JOptionPane.WARNING_MESSAGE);
}
use of org.apache.commons.csv.CSVRecord in project storm by apache.
the class CsvScheme method deserialize.
@Override
public List<Object> deserialize(ByteBuffer ser) {
try {
String data = new String(Utils.toByteArray(ser), StandardCharsets.UTF_8);
CSVParser parser = CSVParser.parse(data, CSVFormat.RFC4180);
CSVRecord record = parser.getRecords().get(0);
Preconditions.checkArgument(record.size() == fieldNames.size(), "Invalid schema");
ArrayList<Object> list = new ArrayList<>(fieldNames.size());
for (int i = 0; i < record.size(); i++) {
list.add(record.get(i));
}
return list;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
Aggregations