use of org.odftoolkit.odfdom.doc.OdfDocument in project OpenRefine by OpenRefine.
the class OdsImporter method createParserUIInitializationData.
@Override
public JSONObject createParserUIInitializationData(ImportingJob job, List<JSONObject> fileRecords, String format) {
JSONObject options = super.createParserUIInitializationData(job, fileRecords, format);
JSONArray sheetRecords = new JSONArray();
JSONUtilities.safePut(options, "sheetRecords", sheetRecords);
OdfDocument odfDoc = null;
try {
JSONObject firstFileRecord = fileRecords.get(0);
File file = ImportingUtilities.getFile(job, firstFileRecord);
InputStream is = new FileInputStream(file);
odfDoc = OdfDocument.loadDocument(is);
List<OdfTable> tables = odfDoc.getTableList();
int sheetCount = tables.size();
boolean hasData = false;
for (int i = 0; i < sheetCount; i++) {
OdfTable sheet = tables.get(i);
int rows = sheet.getRowCount();
JSONObject sheetRecord = new JSONObject();
JSONUtilities.safePut(sheetRecord, "name", sheet.getTableName());
JSONUtilities.safePut(sheetRecord, "rows", rows);
if (hasData) {
JSONUtilities.safePut(sheetRecord, "selected", false);
} else if (rows > 0) {
JSONUtilities.safePut(sheetRecord, "selected", true);
hasData = true;
}
JSONUtilities.append(sheetRecords, sheetRecord);
}
} catch (FileNotFoundException e) {
logger.info("File not found", e);
} catch (Exception e) {
// ODF throws *VERY* wide exceptions
logger.info("Error reading ODF spreadsheet", e);
} finally {
if (odfDoc != null) {
odfDoc.close();
}
}
return options;
}
use of org.odftoolkit.odfdom.doc.OdfDocument in project structr by structr.
the class ODFExporter method exportImage.
public static void exportImage(final ODFExporter thisNode, final String uuid) {
final File output = thisNode.getResultDocument();
try {
final App app = StructrApp.getInstance();
final Image result = app.nodeQuery(Image.class).and(GraphObject.id, uuid).getFirst();
String imageName = result.getProperty(new StringProperty("name"));
String contentType = result.getProperty(new StringProperty("contentType"));
String templateImagePath = null;
OdfDocument doc = OdfDocument.loadDocument(output.getFileOnDisk().getAbsolutePath());
NodeList nodes = doc.getContentRoot().getElementsByTagName(ODF_IMAGE_PARENT_NAME);
for (int i = 0; i < nodes.getLength(); i++) {
Node currentNode = nodes.item(i);
NamedNodeMap attrs = currentNode.getAttributes();
Node fieldName = attrs.getNamedItem(ODF_IMAGE_ATTRIBUTE_PARENT_IMAGE_NAME);
if (fieldName != null && fieldName.getTextContent().equals(imageName)) {
NamedNodeMap childAttrs = currentNode.getFirstChild().getAttributes();
Node filePath = childAttrs.getNamedItem(ODF_IMAGE_ATTRIBUTE_FILE_PATH);
templateImagePath = filePath.getTextContent();
filePath.setTextContent(ODF_IMAGE_DIRECTORY + imageName);
}
}
OdfPackage pkg = doc.getPackage();
if (templateImagePath != null && templateImagePath.length() > 0) {
pkg.remove(templateImagePath);
}
pkg.insert(new URI(result.getFileOnDisk().getAbsolutePath()), ODF_IMAGE_DIRECTORY + imageName, contentType);
pkg.save(output.getFileOnDisk().getAbsolutePath());
pkg.close();
doc.close();
} catch (Exception e) {
logger.error("Error while exporting image to document", e);
}
}
use of org.odftoolkit.odfdom.doc.OdfDocument in project OpenRefine by OpenRefine.
the class OdsImporter method createParserUIInitializationData.
@Override
public ObjectNode createParserUIInitializationData(ImportingJob job, List<ObjectNode> fileRecords, String format) {
ObjectNode options = super.createParserUIInitializationData(job, fileRecords, format);
ArrayNode sheetRecords = ParsingUtilities.mapper.createArrayNode();
JSONUtilities.safePut(options, "sheetRecords", sheetRecords);
OdfDocument odfDoc = null;
try {
for (int index = 0; index < fileRecords.size(); index++) {
ObjectNode fileRecord = fileRecords.get(index);
File file = ImportingUtilities.getFile(job, fileRecord);
InputStream is = new FileInputStream(file);
odfDoc = OdfDocument.loadDocument(is);
List<OdfTable> tables = odfDoc.getTableList();
int sheetCount = tables.size();
for (int i = 0; i < sheetCount; i++) {
OdfTable sheet = tables.get(i);
int rows = sheet.getRowCount();
ObjectNode sheetRecord = ParsingUtilities.mapper.createObjectNode();
JSONUtilities.safePut(sheetRecord, "name", file.getName() + "#" + sheet.getTableName());
JSONUtilities.safePut(sheetRecord, "fileNameAndSheetIndex", file.getName() + "#" + i);
JSONUtilities.safePut(sheetRecord, "rows", rows);
if (rows > 0) {
JSONUtilities.safePut(sheetRecord, "selected", true);
} else {
JSONUtilities.safePut(sheetRecord, "selected", false);
}
JSONUtilities.append(sheetRecords, sheetRecord);
}
}
} catch (FileNotFoundException e) {
logger.info("File not found", e);
} catch (Exception e) {
// ODF throws *VERY* wide exceptions
logger.info("Error reading ODF spreadsheet", e);
} finally {
if (odfDoc != null) {
odfDoc.close();
}
}
return options;
}
use of org.odftoolkit.odfdom.doc.OdfDocument in project OpenRefine by OpenRefine.
the class OdsImporter method parseOneFile.
@Override
public void parseOneFile(Project project, ProjectMetadata metadata, ImportingJob job, String fileSource, InputStream inputStream, int limit, JSONObject options, List<Exception> exceptions) {
OdfDocument odfDoc;
try {
odfDoc = OdfDocument.loadDocument(inputStream);
} catch (Exception e) {
// Ugh! could they throw any wider exception?
exceptions.add(e);
return;
}
List<OdfTable> tables = odfDoc.getTableList();
int[] sheets = JSONUtilities.getIntArray(options, "sheets");
for (int sheetIndex : sheets) {
final OdfTable table = tables.get(sheetIndex);
final int lastRow = table.getRowCount();
TableDataReader dataReader = new TableDataReader() {
int nextRow = 0;
Map<String, Recon> reconMap = new HashMap<String, Recon>();
@Override
public List<Object> getNextRowOfCells() throws IOException {
if (nextRow > lastRow) {
return null;
}
List<Object> cells = new ArrayList<Object>();
OdfTableRow row = table.getRowByIndex(nextRow++);
if (row != null) {
int lastCell = row.getCellCount();
for (int cellIndex = 0; cellIndex <= lastCell; cellIndex++) {
Cell cell = null;
OdfTableCell sourceCell = row.getCellByIndex(cellIndex);
if (sourceCell != null) {
cell = extractCell(sourceCell, reconMap);
}
cells.add(cell);
}
}
return cells;
}
};
TabularImportingParserBase.readTable(project, metadata, job, dataReader, fileSource + "#" + table.getTableName(), limit, options, exceptions);
}
}
use of org.odftoolkit.odfdom.doc.OdfDocument in project OpenRefine by OpenRefine.
the class OdsImporter method parseOneFile.
@Override
public void parseOneFile(Project project, ProjectMetadata metadata, ImportingJob job, String fileSource, InputStream inputStream, int limit, ObjectNode options, List<Exception> exceptions) {
OdfDocument odfDoc;
try {
odfDoc = OdfDocument.loadDocument(inputStream);
} catch (Exception e) {
// Ugh! could they throw any wider exception?
exceptions.add(e);
return;
}
List<OdfTable> tables = odfDoc.getTableList();
ArrayNode sheets = JSONUtilities.getArray(options, "sheets");
for (int i = 0; i < sheets.size(); i++) {
String[] fileNameAndSheetIndex = new String[2];
ObjectNode sheetObj = JSONUtilities.getObjectElement(sheets, i);
// value is fileName#sheetIndex
fileNameAndSheetIndex = sheetObj.get("fileNameAndSheetIndex").asText().split("#");
if (!fileNameAndSheetIndex[0].equals(fileSource))
continue;
final OdfTable table = tables.get(Integer.parseInt(fileNameAndSheetIndex[1]));
final int lastRow = table.getRowCount();
TableDataReader dataReader = new TableDataReader() {
int nextRow = 0;
Map<String, Recon> reconMap = new HashMap<String, Recon>();
@Override
public List<Object> getNextRowOfCells() throws IOException {
if (nextRow > lastRow) {
return null;
}
List<Object> cells = new ArrayList<Object>();
OdfTableRow row = table.getRowByIndex(nextRow++);
int maxCol = 0;
if (row != null) {
int lastCell = row.getCellCount();
for (int cellIndex = 0; cellIndex <= lastCell; cellIndex++) {
Cell cell = null;
OdfTableCell sourceCell = row.getCellByIndex(cellIndex);
if (sourceCell != null) {
cell = extractCell(sourceCell, reconMap);
}
cells.add(cell);
if (cell != null && cellIndex > maxCol) {
maxCol = cellIndex;
}
}
}
// Right truncate null cells
return cells.subList(0, maxCol + 1);
}
};
TabularImportingParserBase.readTable(project, metadata, job, dataReader, fileSource + "#" + table.getTableName(), limit, options, exceptions);
}
}
Aggregations