use of org.apache.derby.iapi.store.raw.PageTimeStamp in project derby by apache.
the class StoredPage method logRecordDataPortion.
private void logRecordDataPortion(int slot, int flag, StoredRecordHeader recordHeader, FormatableBitSet validColumns, OutputStream out, RecordHandle headRowHandle) throws StandardException, IOException {
int offset = getRecordOffset(slot);
// now skip over the original header before writing the data
int oldHeaderLength = recordHeader.size();
offset += oldHeaderLength;
// write out the record data (FH+data+...) from the page data
int startField = recordHeader.getFirstField();
int endField = startField + recordHeader.getNumberFields();
int validColumnsSize = (validColumns == null) ? 0 : validColumns.getLength();
for (int fieldId = startField; fieldId < endField; fieldId++) {
rawDataIn.setPosition(offset);
// get the field header information from the page
int fieldStatus = StoredFieldHeader.readStatus(rawDataIn);
int fieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, fieldStatus, slotFieldSize);
// for purges unless the field is overflow pointer for a long column.
if (((validColumns != null) && !(validColumnsSize > fieldId && validColumns.isSet(fieldId))) || ((flag & BasePage.LOG_RECORD_FOR_PURGE) != 0 && !StoredFieldHeader.isOverflow(fieldStatus))) {
// nope, move page offset along
offset += StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize);
offset += fieldDataLength;
// write a non-existent field
fieldStatus = StoredFieldHeader.setInitial();
fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
StoredFieldHeader.write(out, fieldStatus, 0, slotFieldSize);
continue;
}
// If temp container, don't do anything.
if (((flag & BasePage.LOG_RECORD_FOR_UPDATE) != 0) && headRowHandle != null && StoredFieldHeader.isOverflow(fieldStatus) && owner.isTemporaryContainer() == false) {
// remember the page offset
int saveOffset = rawDataIn.getPosition();
long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);
int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);
// Remember the time stamp on the first page of the column
// chain. This is to prevent the case where the post commit
// work gets fired twice, in that case, the second time it is
// fired, this overflow page may not part of this row chain
// that is being updated.
Page firstPageOnColumnChain = getOverflowPage(overflowPage);
PageTimeStamp ts = firstPageOnColumnChain.currentTimeStamp();
firstPageOnColumnChain.unlatch();
RawTransaction rxact = (RawTransaction) owner.getTransaction();
ReclaimSpace work = new ReclaimSpace(ReclaimSpace.COLUMN_CHAIN, headRowHandle, // long column about to be orphaned by update
fieldId, // page where the long column starts
overflowPage, // record Id of the beginning of the long column
overflowId, ts, rxact.getDataFactory(), true);
rxact.addPostCommitWork(work);
// Just to be safe, reset data stream
rawDataIn.setPosition(saveOffset);
}
// write the field header for the log
offset += StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);
if (fieldDataLength != 0) {
// write the actual data
out.write(pageData, offset, fieldDataLength);
offset += fieldDataLength;
}
}
}
Aggregations