use of org.supercsv.cellprocessor.constraint.UniqueHashCode in project voltdb by VoltDB.
the class Symbols method loadFile.
public void loadFile(String filename) {
// Schema for CSV file
final CellProcessor[] processors = new CellProcessor[] { // Symbol
new UniqueHashCode(), // Name
new NotNull(), // LastSale
new NotNull(), // MarketCap
new NotNull(), // ADR TSO
new NotNull(), // IPOyear
new NotNull(), // Sector
new NotNull(), // Industry
new NotNull(), // Summary Quote
new NotNull(), // blank column
new Optional() };
ICsvMapReader mapReader = null;
try {
mapReader = new CsvMapReader(new FileReader(filename), CsvPreference.STANDARD_PREFERENCE);
// the header columns are used as the keys to the Map
final String[] header = mapReader.getHeader(true);
Map<String, Object> tuple;
int rowsRead = 0;
while ((tuple = mapReader.read(header, processors)) != null) {
Symbol s = new Symbol();
s.symbol = (String) tuple.get("Symbol");
String price = (String) tuple.get("LastSale");
if (price.equals("n/a")) {
price = "20";
}
BigDecimal priceBD = new BigDecimal(price);
s.price = priceBD.multiply(BD10000).intValue();
symbols.add(s);
rowsRead++;
}
System.out.printf("Read %d rows from CSV file at: %s\n", rowsRead, filename);
} catch (Exception e) {
e.printStackTrace();
System.exit(-1);
} finally {
if (mapReader != null) {
try {
mapReader.close();
} catch (Exception e) {
}
}
}
}
Aggregations