use of org.supercsv.io.CsvMapReader in project photon-model by vmware.
the class AWSCsvBillParser method parseDetailedCsvBill.
private void parseDetailedCsvBill(InputStream inputStream, Collection<String> ignorableInvoiceCharge, Set<String> configuredAccounts, BiConsumer<Map<String, AwsAccountDetailDto>, String> hourlyStatsConsumer, Consumer<Map<String, AwsAccountDetailDto>> monthlyStatsConsumer) throws IOException {
final CsvPreference STANDARD_SKIP_COMMENTS = new CsvPreference.Builder(CsvPreference.STANDARD_PREFERENCE).skipComments(new CommentStartsWith(AWS_SKIP_COMMENTS)).build();
try (InputStreamReader reader = new InputStreamReader(inputStream, "UTF-8");
ICsvMapReader mapReader = new CsvMapReader(reader, STANDARD_SKIP_COMMENTS)) {
final String[] header = mapReader.getHeader(true);
List<CellProcessor> processorList = new ArrayList<>();
final CellProcessor[] basicProcessors = getDetailedProcessors(header);
processorList.addAll(Arrays.asList(basicProcessors));
List<String> tagHeaders = new ArrayList<>();
// Add new cell-processors for each extra tag column
int numberOfTags = header.length - basicProcessors.length;
if (numberOfTags > 0) {
for (int i = 0; i < numberOfTags; i++) {
processorList.add(new Optional());
tagHeaders.add(header[basicProcessors.length + i]);
}
}
CellProcessor[] cellProcessorArray = new CellProcessor[processorList.size()];
Map<String, AwsAccountDetailDto> monthlyBill = new HashMap<>();
cellProcessorArray = processorList.toArray(cellProcessorArray);
Map<String, Object> rowMap;
Long prevRowTime = null;
Long prevRowEndTime;
String interval = null;
while ((rowMap = mapReader.read(header, cellProcessorArray)) != null) {
LocalDateTime currRowLocalDateTime = (LocalDateTime) rowMap.get(DetailedCsvHeaders.USAGE_START_DATE);
Long curRowTime = getMillisForHour(currRowLocalDateTime);
if (prevRowTime != null && curRowTime != null && !prevRowTime.equals(curRowTime) && !StringUtils.contains(interval, "-")) {
// This indicates that we have processed all rows belonging to a corresponding hour in the
// current month bill. Consume the batch
hourlyStatsConsumer.accept(monthlyBill, interval);
}
try {
readRow(rowMap, monthlyBill, tagHeaders, ignorableInvoiceCharge, configuredAccounts);
} catch (Exception e) {
this.logger.warning(String.format("Got error while parsing a row in aws bill of %s", getStringFieldValue(rowMap, DetailedCsvHeaders.PAYER_ACCOUNT_ID) + e));
}
if (curRowTime != null) {
prevRowTime = curRowTime;
prevRowEndTime = getMillisForHour((LocalDateTime) rowMap.get(DetailedCsvHeaders.USAGE_END_DATE));
interval = createInterval(prevRowTime, prevRowEndTime);
}
}
// Consume the final batch of parsed rows
hourlyStatsConsumer.accept(monthlyBill, interval);
monthlyStatsConsumer.accept(monthlyBill);
}
}
use of org.supercsv.io.CsvMapReader in project photon-model by vmware.
the class TestUtils method extractAndParseCsvFile.
private static List<Map<String, Object>> extractAndParseCsvFile(Path filePath) throws IOException {
List<Map<String, Object>> csvRows = new ArrayList<>();
String AWS_SKIP_COMMENTS = "Don't see your tags in the report";
AWSCsvBillParser.unzip(filePath.toString(), filePath.getParent().toString());
String unzippedCsvFilePathStr = filePath.toString().substring(0, filePath.toString().lastIndexOf('.'));
final CsvPreference STANDARD_SKIP_COMMENTS = new CsvPreference.Builder(CsvPreference.STANDARD_PREFERENCE).skipComments(new CommentStartsWith(AWS_SKIP_COMMENTS)).build();
try (InputStreamReader reader = new InputStreamReader(new FileInputStream(Paths.get(unzippedCsvFilePathStr).toFile()), "UTF-8");
ICsvMapReader mapReader = new CsvMapReader(reader, STANDARD_SKIP_COMMENTS)) {
final String[] header = mapReader.getHeader(true);
List<CellProcessor> processorList = new ArrayList<>();
final CellProcessor[] basicProcessors = AWSCsvBillParser.getDetailedProcessors(header);
processorList.addAll(Arrays.asList(basicProcessors));
// Add new cell-processors for each extra tag column
int numberOfTags = header.length - basicProcessors.length;
if (numberOfTags > 0) {
for (int i = 0; i < numberOfTags; i++) {
processorList.add(new org.supercsv.cellprocessor.Optional());
}
}
CellProcessor[] cellProcessorArray = new CellProcessor[processorList.size()];
cellProcessorArray = processorList.toArray(cellProcessorArray);
Map<String, Object> row;
while ((row = mapReader.read(header, cellProcessorArray)) != null) {
csvRows.add(row);
}
return csvRows;
} catch (Exception e) {
throw e;
}
}
use of org.supercsv.io.CsvMapReader in project openscoring by openscoring.
the class CsvUtil method readTable.
public static Table<EvaluationRequest> readTable(BufferedReader reader, CsvPreference format) throws IOException {
Table<EvaluationRequest> table = new Table<>();
CsvMapReader parser = new CsvMapReader(reader, format);
String[] header = parser.getHeader(true);
if (header.length > 0 && ("id").equalsIgnoreCase(header[0])) {
table.setId(header[0]);
}
List<EvaluationRequest> requests = new ArrayList<>();
while (true) {
Map<String, String> arguments = parser.read(header);
if (arguments == null) {
break;
}
String id = arguments.remove(table.getId());
EvaluationRequest request = new EvaluationRequest(id);
request.setArguments(arguments);
requests.add(request);
}
parser.close();
table.setRows(requests);
return table;
}
use of org.supercsv.io.CsvMapReader in project mots by motech-implementations.
the class CommunityHealthWorkerService method processChwCsv.
/**
*.
* Processes CSV file which contains CHW list and returns list of errors
* @param chwCsvFile CSV file with CHW list
* @return map with row numbers as keys and errors as values.
* @throws IOException in case of file issues
*/
@SuppressWarnings("PMD.CyclomaticComplexity")
@PreAuthorize(RoleNames.HAS_UPLOAD_CSV_ROLE)
public Map<Integer, String> processChwCsv(MultipartFile chwCsvFile, Boolean selected) throws IOException {
ICsvMapReader csvMapReader;
csvMapReader = new CsvMapReader(new InputStreamReader(chwCsvFile.getInputStream()), CsvPreference.STANDARD_PREFERENCE);
final String[] header = csvMapReader.getHeader(true);
final CellProcessor[] processors = getProcessors();
Map<String, Object> csvRow;
Set<String> phoneNumberSet = new HashSet<>();
Set<String> chwIdSet = new HashSet<>();
Map<Integer, String> errorMap = new HashMap<>();
while ((csvRow = csvMapReader.read(header, processors)) != null) {
LOGGER.debug(String.format("lineNo=%s, rowNo=%s, chw=%s", csvMapReader.getLineNumber(), csvMapReader.getRowNumber(), csvRow));
String phoneNumber = Objects.toString(csvRow.get("Mobile"), null);
String chwId = Objects.toString(csvRow.get("CHW ID"), null);
// Validate
if (phoneNumberSet.contains(phoneNumber)) {
errorMap.put(csvMapReader.getLineNumber(), "Phone number is duplicated in CSV");
continue;
}
if (chwIdSet.contains(chwId)) {
errorMap.put(csvMapReader.getLineNumber(), "CHW ID is duplicated in CSV");
continue;
}
if (validateBlankFieldsInCsv(csvMapReader.getLineNumber(), csvRow, errorMap)) {
continue;
}
// Add to collections
if (phoneNumber != null) {
phoneNumberSet.add(phoneNumber);
}
if (chwId != null) {
chwIdSet.add(chwId);
}
String community = Objects.toString(csvRow.get("Community"), null);
String facility = Objects.toString(csvRow.get("PHU"), null);
Community chwCommunity = communityRepository.findByNameAndFacilityName(community, facility);
if (chwCommunity == null) {
errorMap.put(csvMapReader.getLineNumber(), String.format("There is no community %s in facility %s in MOTS", community, facility));
continue;
}
Optional<CommunityHealthWorker> existingHealthWorker = healthWorkerRepository.findByChwId(csvRow.get("CHW ID").toString());
CommunityHealthWorker communityHealthWorker;
if (existingHealthWorker.isPresent()) {
communityHealthWorker = existingHealthWorker.get();
} else {
communityHealthWorker = new CommunityHealthWorker();
communityHealthWorker.setPreferredLanguage(Language.ENGLISH);
communityHealthWorker.setSelected(false);
}
if ((selected || communityHealthWorker.getSelected()) && StringUtils.isBlank(phoneNumber)) {
errorMap.put(csvMapReader.getLineNumber(), "Phone number is empty");
continue;
}
communityHealthWorker.setChwId(csvRow.get("CHW ID").toString());
communityHealthWorker.setFirstName(csvRow.get("First_Name").toString());
communityHealthWorker.setSecondName(csvRow.get("Second_Name").toString());
communityHealthWorker.setOtherName(Objects.toString(csvRow.get("Other_Name"), null));
communityHealthWorker.setYearOfBirth(csvRow.get("Age") != null ? LocalDate.now().getYear() - Integer.valueOf(Objects.toString(csvRow.get("Age"), null)) : null);
communityHealthWorker.setGender(Gender.getByDisplayName(csvRow.get("Gender").toString()));
communityHealthWorker.setLiteracy(Literacy.getByDisplayName(csvRow.get("Read_Write").toString()));
communityHealthWorker.setEducationLevel(EducationLevel.getByDisplayName(csvRow.get("Education").toString()));
communityHealthWorker.setPhoneNumber(phoneNumber);
communityHealthWorker.setCommunity(chwCommunity);
communityHealthWorker.setHasPeerSupervisor(csvRow.get("Peer_Supervisor").equals("Yes"));
communityHealthWorker.setWorking(csvRow.get("Working").equals("Yes"));
if (selected && !communityHealthWorker.getSelected()) {
selectHealthWorker(communityHealthWorker);
} else {
healthWorkerRepository.save(communityHealthWorker);
}
}
return errorMap;
}
use of org.supercsv.io.CsvMapReader in project waltz by khartec.
the class ScenarioRatingImporter method parseData.
private List<ScenarioRatingRow> parseData(String filename) throws IOException {
InputStreamReader reader = new InputStreamReader(ScenarioRatingImporter.class.getClassLoader().getResourceAsStream(filename));
CsvPreference prefs = new CsvPreference.Builder('"', ',', "\n").ignoreEmptyLines(true).build();
ICsvMapReader mapReader = null;
try {
mapReader = new CsvMapReader(reader, prefs);
final String[] header = mapReader.getHeader(true);
List<ScenarioRatingRow> roadmapRows = new ArrayList<>();
Map<String, String> row;
while ((row = mapReader.read(header)) != null) {
roadmapRows.add(ImmutableScenarioRatingRow.builder().roadmap(row.get("Roadmap")).scenario(row.get("Scenario")).column(row.get("Column")).row(row.get("Row")).assetCode(row.get("Asset Code")).rating(row.get("Rating")).description(row.get("Description")).providedBy(row.get("Last Updated By")).build());
}
return roadmapRows;
} finally {
if (mapReader != null) {
mapReader.close();
mapReader = null;
}
}
}
Aggregations