use of org.apache.commons.csv.CSVPrinter in project webapp by elimu-ai.
the class StoryBookCsvExportController method handleRequest.
@RequestMapping(value = "/storybooks.csv", method = RequestMethod.GET)
public void handleRequest(HttpServletResponse response, OutputStream outputStream) throws IOException {
logger.info("handleRequest");
List<StoryBook> storyBooks = storyBookDao.readAllOrdered();
logger.info("storyBooks.size(): " + storyBooks.size());
CSVFormat csvFormat = CSVFormat.DEFAULT.withHeader("id", "title", "description", "content_license", "attribution_url", "reading_level", "cover_image_id", "chapters");
StringWriter stringWriter = new StringWriter();
CSVPrinter csvPrinter = new CSVPrinter(stringWriter, csvFormat);
for (StoryBook storyBook : storyBooks) {
logger.info("storyBook.getTitle(): \"" + storyBook.getTitle() + "\"");
Long coverImageId = null;
if (storyBook.getCoverImage() != null) {
coverImageId = storyBook.getCoverImage().getId();
}
// Store chapters as JSON objects
JSONArray chaptersJsonArray = new JSONArray();
List<StoryBookChapter> storyBookChapters = storyBookChapterDao.readAll(storyBook);
logger.info("storyBookChapters.size(): " + storyBookChapters.size());
for (StoryBookChapter storyBookChapter : storyBookChapters) {
logger.info("storyBookChapter.getId(): " + storyBookChapter.getId());
StoryBookChapterGson storyBookChapterGson = JpaToGsonConverter.getStoryBookChapterGson(storyBookChapter);
// TODO: move this code block to JpaToGsonConverter?
if (storyBookChapterGson.getImage() != null) {
ImageGson imageGsonWithIdOnly = new ImageGson();
imageGsonWithIdOnly.setId(storyBookChapterGson.getImage().getId());
storyBookChapterGson.setImage(imageGsonWithIdOnly);
}
// Store paragraphs as JSON objects
List<StoryBookParagraphGson> storyBookParagraphs = new ArrayList<>();
logger.info("storyBookParagraphs.size(): " + storyBookParagraphs.size());
for (StoryBookParagraph storyBookParagraph : storyBookParagraphDao.readAll(storyBookChapter)) {
logger.info("storyBookParagraph.getId(): " + storyBookParagraph.getId());
StoryBookParagraphGson storyBookParagraphGson = JpaToGsonConverter.getStoryBookParagraphGson(storyBookParagraph);
storyBookParagraphGson.setWords(null);
storyBookParagraphs.add(storyBookParagraphGson);
}
storyBookChapterGson.setStoryBookParagraphs(storyBookParagraphs);
String json = new Gson().toJson(storyBookChapterGson);
JSONObject jsonObject = new JSONObject(json);
logger.info("jsonObject: " + jsonObject);
chaptersJsonArray.put(jsonObject);
}
logger.info("chaptersJsonArray: " + chaptersJsonArray);
csvPrinter.printRecord(storyBook.getId(), storyBook.getTitle(), storyBook.getDescription(), storyBook.getContentLicense(), storyBook.getAttributionUrl(), storyBook.getReadingLevel(), coverImageId, chaptersJsonArray);
csvPrinter.flush();
}
String csvFileContent = stringWriter.toString();
response.setContentType("text/csv");
byte[] bytes = csvFileContent.getBytes();
response.setContentLength(bytes.length);
try {
outputStream.write(bytes);
outputStream.flush();
outputStream.close();
} catch (IOException ex) {
logger.error(ex);
}
}
use of org.apache.commons.csv.CSVPrinter in project QueryAnalysis by Wikidata.
the class OutputHandlerTSV method initialize.
/**
* Creates the file specified in the constructor and writes the header.
*
* @param fileToWrite location of the file to write the received values to
* @param queryHandlerFactoryToSet The query handler factory to supply the query handler to generate the output with.
* @throws IOException if the output file could not be to
*/
public void initialize(String fileToWrite, QueryHandlerFactory queryHandlerFactoryToSet) throws IOException {
if (!Main.gzipOutput) {
outputFile = fileToWrite + ".tsv";
bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(outputFile, false), StandardCharsets.UTF_8));
} else {
try {
outputFile = fileToWrite + ".tsv.gz";
bufferedWriter = new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(outputFile, false)), StandardCharsets.UTF_8));
} catch (IOException e) {
logger.error("Somehow we are unable to write the output to " + outputFile, e);
}
}
List<String> header = new ArrayList<>();
header.add("Valid");
header.add("First");
header.add("UniqueId");
header.add("OriginalId");
header.add("SourceCategory");
header.add("ToolName");
header.add("ToolVersion");
header.add("ExampleQueryStringComparison");
header.add("ExampleQueryParsedComparison");
header.add("StringLengthWithComments");
header.add("QuerySize");
header.add("VariableCountHead");
header.add("VariableCountPattern");
header.add("TripleCountWithService");
header.add("TripleCountWithoutService");
header.add("QueryType");
header.add("QueryComplexity");
header.add("NonSimplePropertyPaths");
header.add("SubjectsAndObjects");
header.add("Predicates");
header.add("Categories");
header.add("Coordinates");
header.add("UsedSparqlFeatures");
header.add("PrimaryLanguage");
header.add("ServiceCalls");
header.add("original_line(filename_line)");
csvPrinter = new CSVPrinter(bufferedWriter, CSVFormat.newFormat('\t').withHeader(header.toArray(new String[header.size()])).withRecordSeparator('\n').withQuote('"'));
this.queryHandlerFactory = queryHandlerFactoryToSet;
}
use of org.apache.commons.csv.CSVPrinter in project QueryAnalysis by Wikidata.
the class OutputHandlerAnonymizer method initialize.
/**
* @param fileToWrite The file to write the anonymized queries to.
* @param queryHandlerFactoryToSet The query handler class to use for checking query validity.
* @throws IOException if the output file could not be to
*/
public void initialize(String fileToWrite, QueryHandlerFactory queryHandlerFactoryToSet) throws IOException {
if (!Main.gzipOutput) {
outputFile = fileToWrite + ".tsv";
bufferedWriter = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(outputFile, false), StandardCharsets.UTF_8));
} else {
try {
outputFile = fileToWrite + ".tsv.gz";
bufferedWriter = new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(outputFile, false)), StandardCharsets.UTF_8));
} catch (IOException e) {
logger.error("Somehow we are unable to write the output to " + outputFile, e);
}
}
List<String> header = new ArrayList<>();
header.add("anonymizedQuery");
header.add("timestamp");
header.add("sourceCategory");
header.add("user_agent");
csvPrinter = new CSVPrinter(bufferedWriter, CSVFormat.newFormat('\t').withHeader(header.toArray(new String[header.size()])).withRecordSeparator('\n').withQuote('"'));
this.queryHandlerFactory = queryHandlerFactoryToSet;
}
use of org.apache.commons.csv.CSVPrinter in project thingsboard by thingsboard.
the class CassandraDbHelper method dumpCfIfExists.
public static Path dumpCfIfExists(KeyspaceMetadata ks, GuavaSession session, String cfName, String[] columns, String[] defaultValues, String dumpPrefix, boolean printHeader) throws Exception {
if (ks.getTable(cfName) != null) {
Path dumpFile = Files.createTempFile(dumpPrefix, null);
Files.deleteIfExists(dumpFile);
CSVFormat csvFormat = CSV_DUMP_FORMAT;
if (printHeader) {
csvFormat = csvFormat.withHeader(columns);
}
try (CSVPrinter csvPrinter = new CSVPrinter(Files.newBufferedWriter(dumpFile), csvFormat)) {
Statement stmt = SimpleStatement.newInstance("SELECT * FROM " + cfName);
stmt.setPageSize(1000);
ResultSet rs = session.execute(stmt);
Iterator<Row> iter = rs.iterator();
while (iter.hasNext()) {
Row row = iter.next();
if (row != null) {
dumpRow(row, columns, defaultValues, csvPrinter);
}
}
}
return dumpFile;
} else {
return null;
}
}
use of org.apache.commons.csv.CSVPrinter in project thingsboard by thingsboard.
the class CassandraDbHelper method appendToEndOfLine.
public static void appendToEndOfLine(Path targetDumpFile, String toAppend) throws Exception {
Path tmp = Files.createTempFile(null, null);
try (CSVParser csvParser = new CSVParser(Files.newBufferedReader(targetDumpFile), CSV_DUMP_FORMAT)) {
try (CSVPrinter csvPrinter = new CSVPrinter(Files.newBufferedWriter(tmp), CSV_DUMP_FORMAT)) {
csvParser.forEach(record -> {
List<String> newRecord = new ArrayList<>();
record.forEach(val -> newRecord.add(val));
newRecord.add(toAppend);
try {
csvPrinter.printRecord(newRecord);
} catch (IOException e) {
throw new RuntimeException("Error appending to EOL", e);
}
});
}
}
Files.move(tmp, targetDumpFile, StandardCopyOption.REPLACE_EXISTING);
}
Aggregations