use of org.apache.commons.csv.CSVFormat in project Orthanc_Tools by salimkanoun.
the class AutoQuery method csvReading.
/**
* CSV reader and inject value in table
* @param file
* @param table
* @throws IOException
*/
protected void csvReading(File file, JTable table) throws IOException {
CSVFormat csvFileFormat = CSVFormat.EXCEL.withFirstRecordAsHeader().withIgnoreEmptyLines();
CSVParser csvParser = CSVParser.parse(file, StandardCharsets.UTF_8, csvFileFormat);
// On met les records dans une list
List<CSVRecord> csvRecord = csvParser.getRecords();
// On balaie le fichier ligne par ligne
int discarded = 0;
Boolean error = false;
for (int i = 0; i < csvRecord.size(); i++) {
try {
// On recupere les variables
String name = csvRecord.get(i).get(0);
String prenom = csvRecord.get(i).get(1);
String id = csvRecord.get(i).get(2);
String accession = csvRecord.get(i).get(3);
String dateFrom = csvRecord.get(i).get(4);
String dateTo = csvRecord.get(i).get(5);
String modality = csvRecord.get(i).get(6);
String studyDescription = csvRecord.get(i).get(7);
// On les pousse dans le tableau
DefaultTableModel model = (DefaultTableModel) table.getModel();
model.addRow(new Object[] { name, prenom, id, accession, dateFrom, dateTo, modality, studyDescription });
} catch (NullPointerException | ArrayIndexOutOfBoundsException e) {
System.out.println("Error in line " + i + " discarding");
discarded++;
error = true;
}
}
if (error)
JOptionPane.showMessageDialog(null, discarded + " lines discarded, see console for more details", "Wrong Input", JOptionPane.WARNING_MESSAGE);
}
use of org.apache.commons.csv.CSVFormat in project symja_android_library by axkr.
the class ImportString method evaluate.
@Override
public IExpr evaluate(final IAST ast, EvalEngine engine) {
if (!(ast.arg1() instanceof IStringX)) {
return F.NIL;
}
String str1 = ((IStringX) ast.arg1()).toString();
Extension format = Extension.TXT;
if (ast.size() > 2) {
if (!(ast.arg2() instanceof IStringX)) {
return F.NIL;
}
format = Extension.importExtension(((IStringX) ast.arg2()).toString());
}
try {
switch(format) {
case JSON:
return JSONConvert.importJSON(str1);
case EXPRESSIONJSON:
return ExpressionJSONConvert.importExpressionJSON(str1);
case TABLE:
AST2Expr ast2Expr = new AST2Expr(engine.isRelaxedSyntax(), engine);
final Parser parser = new Parser(engine.isRelaxedSyntax(), true);
CSVFormat csvFormat = CSVFormat.RFC4180.withDelimiter(',');
Iterable<CSVRecord> records = csvFormat.parse(new StringReader(str1));
IASTAppendable rowList = F.ListAlloc(256);
for (CSVRecord record : records) {
IASTAppendable columnList = F.ListAlloc(record.size());
for (String string : record) {
final ASTNode node = parser.parse(string);
IExpr temp = ast2Expr.convert(node);
columnList.append(temp);
}
rowList.append(columnList);
}
return rowList;
case STRING:
return ofString(str1, engine);
case TXT:
return ofText(str1, engine);
default:
}
} catch (SyntaxError se) {
LOGGER.log(engine.getLogLevel(), "ImportString: syntax error!", se);
} catch (Exception ex) {
LOGGER.log(engine.getLogLevel(), "ImportString", ex);
}
return F.NIL;
}
use of org.apache.commons.csv.CSVFormat in project beam by apache.
the class SqlTransformRunner method runUsingSqlTransform.
/**
* This is the default method in BeamTpcds.main method. Run job using SqlTranform.query() method.
*
* @param args Command line arguments
* @throws Exception
*/
public static void runUsingSqlTransform(String[] args) throws Exception {
TpcdsOptions tpcdsOptions = PipelineOptionsFactory.fromArgs(args).withValidation().as(TpcdsOptions.class);
String dataSize = TpcdsParametersReader.getAndCheckDataSize(tpcdsOptions);
String[] queryNames = TpcdsParametersReader.getAndCheckQueryNames(tpcdsOptions);
int nThreads = TpcdsParametersReader.getAndCheckTpcParallel(tpcdsOptions);
// Using ExecutorService and CompletionService to fulfill multi-threading functionality
ExecutorService executor = Executors.newFixedThreadPool(nThreads);
CompletionService<TpcdsRunResult> completion = new ExecutorCompletionService<>(executor);
// Make an array of pipelines, each pipeline is responsible for running a corresponding query.
Pipeline[] pipelines = new Pipeline[queryNames.length];
CSVFormat csvFormat = CSVFormat.MYSQL.withDelimiter('|').withNullString("");
// the txt file and store in a GCP directory.
for (int i = 0; i < queryNames.length; i++) {
// For each query, get a copy of pipelineOptions from command line arguments.
TpcdsOptions tpcdsOptionsCopy = PipelineOptionsFactory.fromArgs(args).withValidation().as(TpcdsOptions.class);
// Set a unique job name using the time stamp so that multiple different pipelines can run
// together.
tpcdsOptionsCopy.setJobName(queryNames[i] + "result" + System.currentTimeMillis());
pipelines[i] = Pipeline.create(tpcdsOptionsCopy);
String queryString = QueryReader.readQuery(queryNames[i]);
PCollectionTuple tables = getTables(pipelines[i], csvFormat, queryNames[i]);
try {
tables.apply(SqlTransform.query(queryString)).apply(MapElements.into(TypeDescriptors.strings()).via(Row::toString)).apply(TextIO.write().to(tpcdsOptions.getResultsDirectory() + "/" + dataSize + "/" + pipelines[i].getOptions().getJobName()).withSuffix(".txt").withNumShards(1));
} catch (Exception e) {
LOG.error("{} failed to execute", queryNames[i]);
e.printStackTrace();
}
completion.submit(new TpcdsRun(pipelines[i]));
}
executor.shutdown();
printExecutionSummary(completion, queryNames.length);
}
use of org.apache.commons.csv.CSVFormat in project jgnash by ccavanaugh.
the class CsvExport method exportAccountTree.
public static void exportAccountTree(@NotNull final Engine engine, @NotNull final Path path) {
Objects.requireNonNull(engine);
Objects.requireNonNull(path);
// force a correct file extension
final String fileName = FileUtils.stripFileExtension(path.toString()) + ".csv";
final CSVFormat csvFormat = CSVFormat.EXCEL.withQuoteMode(QuoteMode.ALL);
try (final OutputStreamWriter outputStreamWriter = new OutputStreamWriter(Files.newOutputStream(Paths.get(fileName)), StandardCharsets.UTF_8);
final CSVPrinter writer = new CSVPrinter(new BufferedWriter(outputStreamWriter), csvFormat)) {
// write UTF-8 byte order mark to the file for easier imports
outputStreamWriter.write(BYTE_ORDER_MARK);
writer.printRecord(ResourceUtils.getString("Column.Account"), ResourceUtils.getString("Column.Code"), ResourceUtils.getString("Column.Entries"), ResourceUtils.getString("Column.Balance"), ResourceUtils.getString("Column.ReconciledBalance"), ResourceUtils.getString("Column.Currency"), ResourceUtils.getString("Column.Type"));
// Create a list sorted by depth and account code and then name if code is not specified
final List<Account> accountList = engine.getAccountList();
accountList.sort(Comparators.getAccountByTreePosition(Comparators.getAccountByCode()));
final CurrencyNode currencyNode = engine.getDefaultCurrency();
final LocalDate today = LocalDate.now();
for (final Account account : accountList) {
final String indentedName = SPACE.repeat((account.getDepth() - 1) * INDENT) + account.getName();
final String balance = account.getTreeBalance(today, currencyNode).toPlainString();
final String reconcileBalance = account.getReconciledTreeBalance().toPlainString();
writer.printRecord(indentedName, String.valueOf(account.getAccountCode()), String.valueOf(account.getTransactionCount()), balance, reconcileBalance, account.getCurrencyNode().getSymbol(), account.getAccountType().toString());
}
} catch (final IOException e) {
Logger.getLogger(CsvExport.class.getName()).log(Level.SEVERE, e.getLocalizedMessage(), e);
}
}
use of org.apache.commons.csv.CSVFormat in project midpoint by Evolveum.
the class ImportController method parseColumnsAsVariablesFromFile.
public List<VariablesMap> parseColumnsAsVariablesFromFile(ReportDataType reportData) throws IOException {
List<String> headers = new ArrayList<>();
Reader reader = Files.newBufferedReader(Paths.get(reportData.getFilePath()));
CSVFormat csvFormat = support.createCsvFormat();
if (compiledCollection != null) {
Class<ObjectType> type = compiledCollection.getTargetClass(reportService.getPrismContext());
if (type == null) {
throw new IllegalArgumentException("Couldn't define type of imported objects");
}
PrismObjectDefinition<?> def = reportService.getPrismContext().getSchemaRegistry().findItemDefinitionByCompileTimeClass(type, PrismObjectDefinition.class);
for (GuiObjectColumnType column : columns) {
Validate.notNull(column.getName(), "Name of column is null");
String label = GenericSupport.getLabel(column, def, localizationService);
headers.add(label);
}
} else {
csvFormat = csvFormat.withFirstRecordAsHeader();
}
if (support.isHeader()) {
if (!headers.isEmpty()) {
String[] arrayHeader = new String[headers.size()];
arrayHeader = headers.toArray(arrayHeader);
csvFormat = csvFormat.withHeader(arrayHeader);
}
csvFormat = csvFormat.withSkipHeaderRecord(true);
} else {
if (headers.isEmpty()) {
throw new IllegalArgumentException("Couldn't find headers please " + "define them via view element or write them to csv file and set " + "header element in file format configuration to true.");
}
csvFormat = csvFormat.withSkipHeaderRecord(false);
}
CSVParser csvParser = new CSVParser(reader, csvFormat);
if (headers.isEmpty()) {
headers = csvParser.getHeaderNames();
}
List<VariablesMap> variablesMaps = new ArrayList<>();
for (CSVRecord csvRecord : csvParser) {
VariablesMap variables = new VariablesMap();
for (String name : headers) {
String value;
if (support.isHeader()) {
value = csvRecord.get(name);
} else {
value = csvRecord.get(headers.indexOf(name));
}
if (value != null && value.isEmpty()) {
value = null;
}
if (value != null && value.contains(support.getMultivalueDelimiter())) {
String[] realValues = value.split(support.getMultivalueDelimiter());
variables.put(name, Arrays.asList(realValues), String.class);
} else {
variables.put(name, value, String.class);
}
}
variablesMaps.add(variables);
}
return variablesMaps;
}
Aggregations