Search in sources :

Example 1 with CommentStartsWith

use of org.supercsv.comment.CommentStartsWith in project photon-model by vmware.

the class AWSCsvBillParser method parseDetailedCsvBill.

private void parseDetailedCsvBill(InputStream inputStream, Collection<String> ignorableInvoiceCharge, Set<String> configuredAccounts, BiConsumer<Map<String, AwsAccountDetailDto>, String> hourlyStatsConsumer, Consumer<Map<String, AwsAccountDetailDto>> monthlyStatsConsumer) throws IOException {
    final CsvPreference STANDARD_SKIP_COMMENTS = new CsvPreference.Builder(CsvPreference.STANDARD_PREFERENCE).skipComments(new CommentStartsWith(AWS_SKIP_COMMENTS)).build();
    try (InputStreamReader reader = new InputStreamReader(inputStream, "UTF-8");
        ICsvMapReader mapReader = new CsvMapReader(reader, STANDARD_SKIP_COMMENTS)) {
        final String[] header = mapReader.getHeader(true);
        List<CellProcessor> processorList = new ArrayList<>();
        final CellProcessor[] basicProcessors = getDetailedProcessors(header);
        processorList.addAll(Arrays.asList(basicProcessors));
        List<String> tagHeaders = new ArrayList<>();
        // Add new cell-processors for each extra tag column
        int numberOfTags = header.length - basicProcessors.length;
        if (numberOfTags > 0) {
            for (int i = 0; i < numberOfTags; i++) {
                processorList.add(new Optional());
                tagHeaders.add(header[basicProcessors.length + i]);
            }
        }
        CellProcessor[] cellProcessorArray = new CellProcessor[processorList.size()];
        Map<String, AwsAccountDetailDto> monthlyBill = new HashMap<>();
        cellProcessorArray = processorList.toArray(cellProcessorArray);
        Map<String, Object> rowMap;
        Long prevRowTime = null;
        Long prevRowEndTime;
        String interval = null;
        while ((rowMap = mapReader.read(header, cellProcessorArray)) != null) {
            LocalDateTime currRowLocalDateTime = (LocalDateTime) rowMap.get(DetailedCsvHeaders.USAGE_START_DATE);
            Long curRowTime = getMillisForHour(currRowLocalDateTime);
            if (prevRowTime != null && curRowTime != null && !prevRowTime.equals(curRowTime) && !StringUtils.contains(interval, "-")) {
                // This indicates that we have processed all rows belonging to a corresponding hour in the
                // current month bill. Consume the batch
                hourlyStatsConsumer.accept(monthlyBill, interval);
            }
            try {
                readRow(rowMap, monthlyBill, tagHeaders, ignorableInvoiceCharge, configuredAccounts);
            } catch (Exception e) {
                this.logger.warning(String.format("Got error while parsing a row in aws bill of %s", getStringFieldValue(rowMap, DetailedCsvHeaders.PAYER_ACCOUNT_ID) + e));
            }
            if (curRowTime != null) {
                prevRowTime = curRowTime;
                prevRowEndTime = getMillisForHour((LocalDateTime) rowMap.get(DetailedCsvHeaders.USAGE_END_DATE));
                interval = createInterval(prevRowTime, prevRowEndTime);
            }
        }
        // Consume the final batch of parsed rows
        hourlyStatsConsumer.accept(monthlyBill, interval);
        monthlyStatsConsumer.accept(monthlyBill);
    }
}
Also used : LocalDateTime(org.joda.time.LocalDateTime) InputStreamReader(java.io.InputStreamReader) Optional(org.supercsv.cellprocessor.Optional) HashMap(java.util.HashMap) CommentStartsWith(org.supercsv.comment.CommentStartsWith) ArrayList(java.util.ArrayList) IOException(java.io.IOException) SuperCsvCellProcessorException(org.supercsv.exception.SuperCsvCellProcessorException) CsvPreference(org.supercsv.prefs.CsvPreference) AwsAccountDetailDto(com.vmware.photon.controller.model.adapters.aws.dto.AwsAccountDetailDto) ICsvMapReader(org.supercsv.io.ICsvMapReader) CsvMapReader(org.supercsv.io.CsvMapReader) CellProcessor(org.supercsv.cellprocessor.ift.CellProcessor) ICsvMapReader(org.supercsv.io.ICsvMapReader)

Example 2 with CommentStartsWith

use of org.supercsv.comment.CommentStartsWith in project photon-model by vmware.

the class TestUtils method extractAndParseCsvFile.

private static List<Map<String, Object>> extractAndParseCsvFile(Path filePath) throws IOException {
    List<Map<String, Object>> csvRows = new ArrayList<>();
    String AWS_SKIP_COMMENTS = "Don't see your tags in the report";
    AWSCsvBillParser.unzip(filePath.toString(), filePath.getParent().toString());
    String unzippedCsvFilePathStr = filePath.toString().substring(0, filePath.toString().lastIndexOf('.'));
    final CsvPreference STANDARD_SKIP_COMMENTS = new CsvPreference.Builder(CsvPreference.STANDARD_PREFERENCE).skipComments(new CommentStartsWith(AWS_SKIP_COMMENTS)).build();
    try (InputStreamReader reader = new InputStreamReader(new FileInputStream(Paths.get(unzippedCsvFilePathStr).toFile()), "UTF-8");
        ICsvMapReader mapReader = new CsvMapReader(reader, STANDARD_SKIP_COMMENTS)) {
        final String[] header = mapReader.getHeader(true);
        List<CellProcessor> processorList = new ArrayList<>();
        final CellProcessor[] basicProcessors = AWSCsvBillParser.getDetailedProcessors(header);
        processorList.addAll(Arrays.asList(basicProcessors));
        // Add new cell-processors for each extra tag column
        int numberOfTags = header.length - basicProcessors.length;
        if (numberOfTags > 0) {
            for (int i = 0; i < numberOfTags; i++) {
                processorList.add(new org.supercsv.cellprocessor.Optional());
            }
        }
        CellProcessor[] cellProcessorArray = new CellProcessor[processorList.size()];
        cellProcessorArray = processorList.toArray(cellProcessorArray);
        Map<String, Object> row;
        while ((row = mapReader.read(header, cellProcessorArray)) != null) {
            csvRows.add(row);
        }
        return csvRows;
    } catch (Exception e) {
        throw e;
    }
}
Also used : InputStreamReader(java.io.InputStreamReader) AmazonEC2ClientBuilder(com.amazonaws.services.ec2.AmazonEC2ClientBuilder) CommentStartsWith(org.supercsv.comment.CommentStartsWith) ArrayList(java.util.ArrayList) FileInputStream(java.io.FileInputStream) IOException(java.io.IOException) CsvPreference(org.supercsv.prefs.CsvPreference) ICsvMapReader(org.supercsv.io.ICsvMapReader) CsvMapReader(org.supercsv.io.CsvMapReader) CellProcessor(org.supercsv.cellprocessor.ift.CellProcessor) Map(java.util.Map) ICsvMapReader(org.supercsv.io.ICsvMapReader)

Aggregations

IOException (java.io.IOException)2 InputStreamReader (java.io.InputStreamReader)2 ArrayList (java.util.ArrayList)2 CellProcessor (org.supercsv.cellprocessor.ift.CellProcessor)2 CommentStartsWith (org.supercsv.comment.CommentStartsWith)2 CsvMapReader (org.supercsv.io.CsvMapReader)2 ICsvMapReader (org.supercsv.io.ICsvMapReader)2 CsvPreference (org.supercsv.prefs.CsvPreference)2 AmazonEC2ClientBuilder (com.amazonaws.services.ec2.AmazonEC2ClientBuilder)1 AwsAccountDetailDto (com.vmware.photon.controller.model.adapters.aws.dto.AwsAccountDetailDto)1 FileInputStream (java.io.FileInputStream)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 LocalDateTime (org.joda.time.LocalDateTime)1 Optional (org.supercsv.cellprocessor.Optional)1 SuperCsvCellProcessorException (org.supercsv.exception.SuperCsvCellProcessorException)1