use of org.apache.derby.iapi.store.raw.Page in project derby by apache.
the class BaseContainerHandle method addPage.
/*
** Methods from ContainerHandle
*/
/**
* Add a page to the container
* The page returned will be observing me.
*
* @see BaseContainer#addPage
* @see ContainerHandle#addPage
* @exception StandardException Standard Derby error policy
*/
public Page addPage() throws StandardException {
checkUpdateOpen();
Page page = container.addPage(this, false);
return page;
}
use of org.apache.derby.iapi.store.raw.Page in project derby by apache.
the class FileContainer method switchToMultiInsertPageMode.
private synchronized void switchToMultiInsertPageMode(BaseContainerHandle handle) throws StandardException {
if (lastInsertedPage.length == 1) {
long last = lastInsertedPage[0];
lastInsertedPage = new long[4];
lastInsertedPage[0] = last;
for (int i = 3; i > 0; i--) {
Page page = addPage(handle, false);
lastInsertedPage[i] = page.getPageNumber();
page.unlatch();
}
}
}
use of org.apache.derby.iapi.store.raw.Page in project derby by apache.
the class InsertOperation method restoreLoggedRow.
/*
* LogicalUndoable methods
*/
/**
* Restore the row stored in the optional data of the log record.
*
* @exception IOException error reading from log stream
* @exception StandardException Standard Derby error policy
*/
public void restoreLoggedRow(Object[] row, LimitObjectInput in) throws StandardException, IOException {
Page p = null;
try {
// the optional data is written by the page in the same format it
// stores record on the page,
// only a page knows how to restore a logged row back to a storable row
// first get the page where the insert went even though the row may no
// longer be there
p = getContainer().getPage(getPageId().getPageNumber());
((BasePage) p).restoreRecordFromStream(in, row);
} finally {
if (p != null) {
p.unlatch();
p = null;
}
}
}
use of org.apache.derby.iapi.store.raw.Page in project derby by apache.
the class StoredPage method logRecordDataPortion.
private void logRecordDataPortion(int slot, int flag, StoredRecordHeader recordHeader, FormatableBitSet validColumns, OutputStream out, RecordHandle headRowHandle) throws StandardException, IOException {
int offset = getRecordOffset(slot);
// now skip over the original header before writing the data
int oldHeaderLength = recordHeader.size();
offset += oldHeaderLength;
// write out the record data (FH+data+...) from the page data
int startField = recordHeader.getFirstField();
int endField = startField + recordHeader.getNumberFields();
int validColumnsSize = (validColumns == null) ? 0 : validColumns.getLength();
for (int fieldId = startField; fieldId < endField; fieldId++) {
rawDataIn.setPosition(offset);
// get the field header information from the page
int fieldStatus = StoredFieldHeader.readStatus(rawDataIn);
int fieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, fieldStatus, slotFieldSize);
// for purges unless the field is overflow pointer for a long column.
if (((validColumns != null) && !(validColumnsSize > fieldId && validColumns.isSet(fieldId))) || ((flag & BasePage.LOG_RECORD_FOR_PURGE) != 0 && !StoredFieldHeader.isOverflow(fieldStatus))) {
// nope, move page offset along
offset += StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize);
offset += fieldDataLength;
// write a non-existent field
fieldStatus = StoredFieldHeader.setInitial();
fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
StoredFieldHeader.write(out, fieldStatus, 0, slotFieldSize);
continue;
}
// If temp container, don't do anything.
if (((flag & BasePage.LOG_RECORD_FOR_UPDATE) != 0) && headRowHandle != null && StoredFieldHeader.isOverflow(fieldStatus) && owner.isTemporaryContainer() == false) {
// remember the page offset
int saveOffset = rawDataIn.getPosition();
long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);
int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);
// Remember the time stamp on the first page of the column
// chain. This is to prevent the case where the post commit
// work gets fired twice, in that case, the second time it is
// fired, this overflow page may not part of this row chain
// that is being updated.
Page firstPageOnColumnChain = getOverflowPage(overflowPage);
PageTimeStamp ts = firstPageOnColumnChain.currentTimeStamp();
firstPageOnColumnChain.unlatch();
RawTransaction rxact = (RawTransaction) owner.getTransaction();
ReclaimSpace work = new ReclaimSpace(ReclaimSpace.COLUMN_CHAIN, headRowHandle, // long column about to be orphaned by update
fieldId, // page where the long column starts
overflowPage, // record Id of the beginning of the long column
overflowId, ts, rxact.getDataFactory(), true);
rxact.addPostCommitWork(work);
// Just to be safe, reset data stream
rawDataIn.setPosition(saveOffset);
}
// write the field header for the log
offset += StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);
if (fieldDataLength != 0) {
// write the actual data
out.write(pageData, offset, fieldDataLength);
offset += fieldDataLength;
}
}
}
use of org.apache.derby.iapi.store.raw.Page in project derby by apache.
the class BTreePostCommit method purgeRowLevelCommittedDeletes.
/**
* Attempt to reclaim committed deleted rows from the page with row locking.
* <p>
* Get exclusive latch on page, and then loop backward through
* page searching for deleted rows which are committed.
* This routine is called only from post commit processing so it will never
* see rows deleted by the current transaction.
* For each deleted row on the page
* it attempts to get an exclusive lock on the deleted row, NOWAIT.
* If it succeeds, and since this transaction did not delete the row then
* the row must have been deleted by a transaction which has committed, so
* it is safe to purge the row. It then purges the row from the page.
* <p>
* The latch on the leaf page containing the purged rows must be kept until
* after the transaction has been committed or aborted in order to insure
* proper undo of the purges can take place. Otherwise another transaction
* could use the space freed by the purge and then prevent the purge from
* being able to undo.
*
* @param open_btree The already open btree, which has been locked with IX
* table lock, to use to get latch on page.
*
* @exception StandardException Standard exception policy.
*/
private final void purgeRowLevelCommittedDeletes(OpenBTree open_btree) throws StandardException {
LeafControlRow leaf = null;
// The following can fail, returning null, either if it can't get
// the latch or somehow the page requested no longer exists. In
// either case the post commit work will just skip it.
leaf = (LeafControlRow) ControlRow.getNoWait(open_btree, page_number);
if (leaf == null)
return;
BTreeLockingPolicy btree_locking_policy = open_btree.getLockingPolicy();
// The number records that can be reclaimed is:
// total recs - control row - recs_not_deleted
int num_possible_commit_delete = leaf.page.recordCount() - 1 - leaf.page.nonDeletedRecordCount();
if (num_possible_commit_delete > 0) {
DataValueDescriptor[] scratch_template = open_btree.getRuntimeMem().get_template(open_btree.getRawTran());
Page page = leaf.page;
// RowLocation column is in last column of template.
FetchDescriptor lock_fetch_desc = RowUtil.getFetchDescriptorConstant(scratch_template.length - 1);
// have already looked at).
for (int slot_no = page.recordCount() - 1; slot_no > 0; slot_no--) {
if (page.isDeletedAtSlot(slot_no)) {
// safe to purge it.
if (btree_locking_policy.lockScanCommittedDeletedRow(open_btree, leaf, scratch_template, lock_fetch_desc, slot_no)) {
// the row is a committed deleted row, purge it.
page.purgeAtSlot(slot_no, 1, true);
// Tell scans positioned on this page to reposition
// because the row they are positioned on may have
// disappeared.
page.setRepositionNeeded();
}
}
}
}
return;
}
Aggregations