use of org.apache.commons.csv.CSVFormat in project nifi by apache.
the class TestWriteCSVResult method testExtraFieldInWriteRecord.
@Test
public void testExtraFieldInWriteRecord() throws IOException {
final CSVFormat csvFormat = CSVFormat.DEFAULT.withEscape('\\').withQuoteMode(QuoteMode.NONE).withRecordSeparator("\n");
final List<RecordField> fields = new ArrayList<>();
fields.add(new RecordField("id", RecordFieldType.STRING.getDataType()));
final RecordSchema schema = new SimpleRecordSchema(fields);
final Map<String, Object> values = new HashMap<>();
values.put("id", "1");
values.put("name", "John");
final Record record = new MapRecord(schema, values);
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final String output;
try (final WriteCSVResult writer = new WriteCSVResult(csvFormat, schema, new SchemaNameAsAttribute(), baos, RecordFieldType.DATE.getDefaultFormat(), RecordFieldType.TIME.getDefaultFormat(), RecordFieldType.TIMESTAMP.getDefaultFormat(), true, "ASCII")) {
writer.beginRecordSet();
writer.write(record);
writer.finishRecordSet();
writer.flush();
output = baos.toString();
}
assertEquals("id\n1\n", output);
}
use of org.apache.commons.csv.CSVFormat in project nifi by apache.
the class TestWriteCSVResult method testMissingFieldWriteRawRecord.
@Test
public void testMissingFieldWriteRawRecord() throws IOException {
final CSVFormat csvFormat = CSVFormat.DEFAULT.withEscape('\\').withQuoteMode(QuoteMode.NONE).withRecordSeparator("\n");
final List<RecordField> fields = new ArrayList<>();
fields.add(new RecordField("id", RecordFieldType.STRING.getDataType()));
fields.add(new RecordField("name", RecordFieldType.STRING.getDataType()));
final RecordSchema schema = new SimpleRecordSchema(fields);
final Map<String, Object> values = new LinkedHashMap<>();
values.put("id", "1");
final Record record = new MapRecord(schema, values);
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final String output;
try (final WriteCSVResult writer = new WriteCSVResult(csvFormat, schema, new SchemaNameAsAttribute(), baos, RecordFieldType.DATE.getDefaultFormat(), RecordFieldType.TIME.getDefaultFormat(), RecordFieldType.TIMESTAMP.getDefaultFormat(), true, "ASCII")) {
writer.beginRecordSet();
writer.writeRawRecord(record);
writer.finishRecordSet();
writer.flush();
output = baos.toString();
}
assertEquals("id,name\n1,\n", output);
}
use of org.apache.commons.csv.CSVFormat in project nifi by apache.
the class CSVUtils method buildCustomFormat.
private static CSVFormat buildCustomFormat(final PropertyContext context) {
final char valueSeparator = getUnescapedChar(context, VALUE_SEPARATOR);
CSVFormat format = CSVFormat.newFormat(valueSeparator).withAllowMissingColumnNames().withIgnoreEmptyLines();
final PropertyValue skipHeaderPropertyValue = context.getProperty(FIRST_LINE_IS_HEADER);
if (skipHeaderPropertyValue.getValue() != null && skipHeaderPropertyValue.asBoolean()) {
format = format.withFirstRecordAsHeader();
}
format = format.withQuote(getChar(context, QUOTE_CHAR));
format = format.withEscape(getChar(context, ESCAPE_CHAR));
format = format.withTrim(context.getProperty(TRIM_FIELDS).asBoolean());
if (context.getProperty(COMMENT_MARKER).isSet()) {
format = format.withCommentMarker(getChar(context, COMMENT_MARKER));
}
if (context.getProperty(NULL_STRING).isSet()) {
format = format.withNullString(CSVUtils.unescape(context.getProperty(NULL_STRING).getValue()));
}
final PropertyValue quoteValue = context.getProperty(QUOTE_MODE);
if (quoteValue != null) {
final QuoteMode quoteMode = QuoteMode.valueOf(quoteValue.getValue());
format = format.withQuoteMode(quoteMode);
}
final PropertyValue trailingDelimiterValue = context.getProperty(TRAILING_DELIMITER);
if (trailingDelimiterValue != null) {
final boolean trailingDelimiter = trailingDelimiterValue.asBoolean();
format = format.withTrailingDelimiter(trailingDelimiter);
}
final PropertyValue recordSeparator = context.getProperty(RECORD_SEPARATOR);
if (recordSeparator != null) {
final String separator = unescape(recordSeparator.getValue());
format = format.withRecordSeparator(separator);
}
return format;
}
use of org.apache.commons.csv.CSVFormat in project symja_android_library by axkr.
the class Import method evaluate.
@Override
public IExpr evaluate(final IAST ast, EvalEngine engine) {
Validate.checkSize(ast, 3);
if (!(ast.arg1() instanceof IStringX)) {
throw new WrongNumberOfArguments(ast, 1, ast.size() - 1);
}
if (!(ast.arg2() instanceof IStringX)) {
throw new WrongNumberOfArguments(ast, 2, ast.size() - 1);
}
IStringX arg1 = (IStringX) ast.arg1();
IStringX arg2 = (IStringX) ast.arg2();
FileReader reader = null;
try {
reader = new FileReader(arg1.toString());
if (arg2.contentEquals("Table")) {
AST2Expr ast2Expr = AST2Expr.CONST;
if (engine.isRelaxedSyntax()) {
ast2Expr = AST2Expr.CONST_LC;
}
final Parser parser = new Parser(engine.isRelaxedSyntax(), true);
CSVFormat csvFormat = CSVFormat.RFC4180.withDelimiter(' ');
Iterable<CSVRecord> records = csvFormat.parse(reader);
IAST rowList = F.List();
for (CSVRecord record : records) {
IAST columnList = F.List();
for (String string : record) {
final ASTNode node = parser.parse(string);
IExpr temp = ast2Expr.convert(node, engine);
columnList.append(temp);
}
rowList.append(columnList);
}
return rowList;
}
} catch (IOException ioe) {
engine.printMessage("Import: file " + arg1.toString() + " not found!");
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
}
}
}
return F.NIL;
}
use of org.apache.commons.csv.CSVFormat in project gephi by gephi.
the class SpreadsheetUtils method configureCSVParser.
public static CSVParser configureCSVParser(File file, Character fieldSeparator, Charset charset, boolean withFirstRecordAsHeader) throws IOException {
if (fieldSeparator == null) {
fieldSeparator = ',';
}
CSVFormat csvFormat = CSVFormat.DEFAULT.withDelimiter(fieldSeparator).withEscape('\\').withIgnoreEmptyLines(true).withNullString("").withIgnoreSurroundingSpaces(true).withTrim(true);
if (withFirstRecordAsHeader) {
csvFormat = csvFormat.withFirstRecordAsHeader().withAllowMissingColumnNames(false).withIgnoreHeaderCase(false);
} else {
csvFormat = csvFormat.withHeader((String[]) null).withSkipHeaderRecord(false);
}
boolean hasBOM = false;
try (FileInputStream is = new FileInputStream(file)) {
CharsetToolkit charsetToolkit = new CharsetToolkit(is);
hasBOM = charsetToolkit.hasUTF8Bom() || charsetToolkit.hasUTF16BEBom() || charsetToolkit.hasUTF16LEBom();
} catch (IOException e) {
// NOOP
}
FileInputStream fileInputStream = new FileInputStream(file);
InputStreamReader is = new InputStreamReader(fileInputStream, charset);
if (hasBOM) {
try {
is.read();
} catch (IOException e) {
// should never happen, as a file with no content
// but with a BOM has at least one char
}
}
return new CSVParser(is, csvFormat);
}
Aggregations