use of org.apache.poi.hslf.exceptions.HSLFException in project poi by apache.
the class HSLFSlideShowImpl method updateAndWriteDependantRecords.
/**
* This is a helper functions, which is needed for adding new position dependent records
* or finally write the slideshow to a file.
*
* @param os the stream to write to, if null only the references are updated
* @param interestingRecords a map of interesting records (PersistPtrHolder and UserEditAtom)
* referenced by their RecordType. Only the very last of each type will be saved to the map.
* May be null, if not needed.
* @throws IOException
*/
public void updateAndWriteDependantRecords(OutputStream os, Map<RecordTypes, PositionDependentRecord> interestingRecords) throws IOException {
// For position dependent records, hold where they were and now are
// As we go along, update, and hand over, to any Position Dependent
// records we happen across
Map<Integer, Integer> oldToNewPositions = new HashMap<Integer, Integer>();
// First pass - figure out where all the position dependent
// records are going to end up, in the new scheme
// (Annoyingly, some powerpoint files have PersistPtrHolders
// that reference slides after the PersistPtrHolder)
UserEditAtom usr = null;
PersistPtrHolder ptr = null;
CountingOS cos = new CountingOS();
for (Record record : _records) {
// all top level records are position dependent
assert (record instanceof PositionDependentRecord);
PositionDependentRecord pdr = (PositionDependentRecord) record;
int oldPos = pdr.getLastOnDiskOffset();
int newPos = cos.size();
pdr.setLastOnDiskOffset(newPos);
if (oldPos != UNSET_OFFSET) {
// new records don't need a mapping, as they aren't in a relation yet
oldToNewPositions.put(oldPos, newPos);
}
// Grab interesting records as they come past
// this will only save the very last record of each type
RecordTypes saveme = null;
int recordType = (int) record.getRecordType();
if (recordType == RecordTypes.PersistPtrIncrementalBlock.typeID) {
saveme = RecordTypes.PersistPtrIncrementalBlock;
ptr = (PersistPtrHolder) pdr;
} else if (recordType == RecordTypes.UserEditAtom.typeID) {
saveme = RecordTypes.UserEditAtom;
usr = (UserEditAtom) pdr;
}
if (interestingRecords != null && saveme != null) {
interestingRecords.put(saveme, pdr);
}
// Dummy write out, so the position winds on properly
record.writeOut(cos);
}
cos.close();
if (usr == null || ptr == null) {
throw new HSLFException("UserEditAtom or PersistPtr can't be determined.");
}
Map<Integer, Integer> persistIds = new HashMap<Integer, Integer>();
for (Map.Entry<Integer, Integer> entry : ptr.getSlideLocationsLookup().entrySet()) {
persistIds.put(oldToNewPositions.get(entry.getValue()), entry.getKey());
}
HSLFSlideShowEncrypted encData = new HSLFSlideShowEncrypted(getDocumentEncryptionAtom());
for (Record record : _records) {
assert (record instanceof PositionDependentRecord);
// We've already figured out their new location, and
// told them that
// Tell them of the positions of the other records though
PositionDependentRecord pdr = (PositionDependentRecord) record;
Integer persistId = persistIds.get(pdr.getLastOnDiskOffset());
if (persistId == null) {
persistId = 0;
}
// For now, we're only handling PositionDependentRecord's that
// happen at the top level.
// In future, we'll need the handle them everywhere, but that's
// a bit trickier
pdr.updateOtherRecordReferences(oldToNewPositions);
// Whatever happens, write out that record tree
if (os != null) {
record.writeOut(encData.encryptRecord(os, persistId, record));
}
}
encData.close();
// Update and write out the Current User atom
int oldLastUserEditAtomPos = (int) currentUser.getCurrentEditOffset();
Integer newLastUserEditAtomPos = oldToNewPositions.get(oldLastUserEditAtomPos);
if (newLastUserEditAtomPos == null || usr.getLastOnDiskOffset() != newLastUserEditAtomPos) {
throw new HSLFException("Couldn't find the new location of the last UserEditAtom that used to be at " + oldLastUserEditAtomPos);
}
currentUser.setCurrentEditOffset(usr.getLastOnDiskOffset());
}
use of org.apache.poi.hslf.exceptions.HSLFException in project poi by apache.
the class HSLFTextParagraph method refreshRecords.
/**
* Writes the textbox records back to the document record
*/
private static void refreshRecords(List<HSLFTextParagraph> paragraphs) {
TextHeaderAtom headerAtom = paragraphs.get(0)._headerAtom;
RecordContainer _txtbox = headerAtom.getParentRecord();
if (_txtbox instanceof EscherTextboxWrapper) {
try {
((EscherTextboxWrapper) _txtbox).writeOut(null);
} catch (IOException e) {
throw new HSLFException("failed dummy write", e);
}
}
}
use of org.apache.poi.hslf.exceptions.HSLFException in project poi by apache.
the class HSLFTextShape method afterInsert.
/**
* When a textbox is added to a sheet we need to tell upper-level
* <code>PPDrawing</code> about it.
*
* @param sh the sheet we are adding to
*/
@Override
protected void afterInsert(HSLFSheet sh) {
super.afterInsert(sh);
storeText();
EscherTextboxWrapper thisTxtbox = getEscherTextboxWrapper();
if (thisTxtbox != null) {
getSpContainer().addChildRecord(thisTxtbox.getEscherRecord());
PPDrawing ppdrawing = sh.getPPDrawing();
ppdrawing.addTextboxWrapper(thisTxtbox);
// Ensure the escher layer knows about the added records
try {
thisTxtbox.writeOut(null);
} catch (IOException e) {
throw new HSLFException(e);
}
boolean isInitialAnchor = getAnchor().equals(new Rectangle2D.Double());
boolean isFilledTxt = !"".equals(getText());
if (isInitialAnchor && isFilledTxt) {
resizeToFitText();
}
}
for (HSLFTextParagraph htp : _paragraphs) {
htp.setShapeId(getShapeId());
}
sh.onAddTextShape(this);
}
use of org.apache.poi.hslf.exceptions.HSLFException in project poi by apache.
the class PersistPtrHolder method normalizePersistDirectory.
private void normalizePersistDirectory() {
TreeMap<Integer, Integer> orderedSlideLocations = new TreeMap<Integer, Integer>(_slideLocations);
@SuppressWarnings("resource") BufAccessBAOS // NOSONAR
bos = new BufAccessBAOS();
byte[] intbuf = new byte[4];
int lastPersistEntry = -1;
int lastSlideId = -1;
for (Entry<Integer, Integer> me : orderedSlideLocations.entrySet()) {
int nextSlideId = me.getKey();
int offset = me.getValue();
try {
if (lastSlideId + 1 == nextSlideId) {
// use existing PersistDirectoryEntry, need to increase entry count
assert (lastPersistEntry != -1);
int infoBlock = LittleEndian.getInt(bos.getBuf(), lastPersistEntry);
int entryCnt = cntPersistFld.getValue(infoBlock);
infoBlock = cntPersistFld.setValue(infoBlock, entryCnt + 1);
LittleEndian.putInt(bos.getBuf(), lastPersistEntry, infoBlock);
} else {
// start new PersistDirectoryEntry
lastPersistEntry = bos.size();
int infoBlock = persistIdFld.setValue(0, nextSlideId);
infoBlock = cntPersistFld.setValue(infoBlock, 1);
LittleEndian.putInt(intbuf, 0, infoBlock);
bos.write(intbuf);
}
// Add to the ptrData offset lookup hash
LittleEndian.putInt(intbuf, 0, offset);
bos.write(intbuf);
lastSlideId = nextSlideId;
} catch (IOException e) {
// ByteArrayOutputStream is very unlikely throwing a IO exception (maybe because of OOM ...)
throw new HSLFException(e);
}
}
// Save the new ptr data
_ptrData = bos.toByteArray();
// Update the atom header
LittleEndian.putInt(_header, 4, bos.size());
}
use of org.apache.poi.hslf.exceptions.HSLFException in project poi by apache.
the class WMF method getData.
@Override
public byte[] getData() {
try {
byte[] rawdata = getRawData();
ByteArrayOutputStream out = new ByteArrayOutputStream();
InputStream is = new ByteArrayInputStream(rawdata);
Header header = new Header();
header.read(rawdata, CHECKSUM_SIZE * getUIDInstanceCount());
long len = is.skip(header.getSize() + (long) CHECKSUM_SIZE * getUIDInstanceCount());
assert (len == header.getSize() + CHECKSUM_SIZE * getUIDInstanceCount());
ImageHeaderWMF aldus = new ImageHeaderWMF(header.getBounds());
aldus.write(out);
InflaterInputStream inflater = new InflaterInputStream(is);
byte[] chunk = new byte[4096];
int count;
while ((count = inflater.read(chunk)) >= 0) {
out.write(chunk, 0, count);
}
inflater.close();
return out.toByteArray();
} catch (IOException e) {
throw new HSLFException(e);
}
}
Aggregations